diff --git a/.github/.OwlBot.lock.yaml b/.github/.OwlBot.lock.yaml index 3815c983cb..cde2929799 100644 --- a/.github/.OwlBot.lock.yaml +++ b/.github/.OwlBot.lock.yaml @@ -13,4 +13,4 @@ # limitations under the License. docker: image: gcr.io/cloud-devrel-public-resources/owlbot-python:latest - digest: sha256:7a40313731a7cb1454eef6b33d3446ebb121836738dc3ab3d2d3ded5268c35b6 + digest: sha256:ddf4551385d566771dc713090feb7b4c1164fb8a698fe52bbe7670b24236565b diff --git a/.kokoro/docker/docs/Dockerfile b/.kokoro/docker/docs/Dockerfile index 238b87b9d1..f8137d0ae4 100644 --- a/.kokoro/docker/docs/Dockerfile +++ b/.kokoro/docker/docs/Dockerfile @@ -60,16 +60,16 @@ RUN apt-get update \ && rm -rf /var/lib/apt/lists/* \ && rm -f /var/cache/apt/archives/*.deb -###################### Install python 3.8.11 +###################### Install python 3.9.13 -# Download python 3.8.11 -RUN wget https://www.python.org/ftp/python/3.8.11/Python-3.8.11.tgz +# Download python 3.9.13 +RUN wget https://www.python.org/ftp/python/3.9.13/Python-3.9.13.tgz # Extract files -RUN tar -xvf Python-3.8.11.tgz +RUN tar -xvf Python-3.9.13.tgz -# Install python 3.8.11 -RUN ./Python-3.8.11/configure --enable-optimizations +# Install python 3.9.13 +RUN ./Python-3.9.13/configure --enable-optimizations RUN make altinstall ###################### Install pip diff --git a/.kokoro/requirements.in b/.kokoro/requirements.in index 7718391a34..ec867d9fd6 100644 --- a/.kokoro/requirements.in +++ b/.kokoro/requirements.in @@ -1,8 +1,10 @@ gcp-docuploader -gcp-releasetool +gcp-releasetool>=1.10.5 # required for compatibility with cryptography>=39.x importlib-metadata typing-extensions twine wheel setuptools -nox \ No newline at end of file +nox>=2022.11.21 # required to remove dependency on py +charset-normalizer<3 +click<8.1.0 diff --git a/.kokoro/requirements.txt b/.kokoro/requirements.txt index d15994bac9..c7929db6d1 100644 --- a/.kokoro/requirements.txt +++ b/.kokoro/requirements.txt @@ -1,6 +1,6 @@ # -# This file is autogenerated by pip-compile with python 3.10 -# To update, run: +# This file is autogenerated by pip-compile with Python 3.9 +# by the following command: # # pip-compile --allow-unsafe --generate-hashes requirements.in # @@ -20,9 +20,9 @@ cachetools==5.2.0 \ --hash=sha256:6a94c6402995a99c3970cc7e4884bb60b4a8639938157eeed436098bf9831757 \ --hash=sha256:f9f17d2aec496a9aa6b76f53e3b614c965223c061982d434d160f930c698a9db # via google-auth -certifi==2022.6.15 \ - --hash=sha256:84c85a9078b11105f04f3036a9482ae10e4621616db313fe045dd24743a0820d \ - --hash=sha256:fe86415d55e84719d75f8b69414f6438ac3547d2078ab91b67e779ef69378412 +certifi==2022.12.7 \ + --hash=sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3 \ + --hash=sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18 # via requests cffi==1.15.1 \ --hash=sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5 \ @@ -93,11 +93,14 @@ cffi==1.15.1 \ charset-normalizer==2.1.1 \ --hash=sha256:5a3d016c7c547f69d6f81fb0db9449ce888b418b5b9952cc5e6e66843e9dd845 \ --hash=sha256:83e9a75d1911279afd89352c68b45348559d1fc0506b054b346651b5e7fee29f - # via requests + # via + # -r requirements.in + # requests click==8.0.4 \ --hash=sha256:6a7a62563bbfabfda3a38f3023a1db4a35978c0abd76f6c9605ecd6554d6d9b1 \ --hash=sha256:8458d7b1287c5fb128c90e23381cf99dcde74beaf6c7ff6384ce84d6fe090adb # via + # -r requirements.in # gcp-docuploader # gcp-releasetool colorlog==6.7.0 \ @@ -110,29 +113,26 @@ commonmark==0.9.1 \ --hash=sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60 \ --hash=sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9 # via rich -cryptography==37.0.4 \ - --hash=sha256:190f82f3e87033821828f60787cfa42bff98404483577b591429ed99bed39d59 \ - --hash=sha256:2be53f9f5505673eeda5f2736bea736c40f051a739bfae2f92d18aed1eb54596 \ - --hash=sha256:30788e070800fec9bbcf9faa71ea6d8068f5136f60029759fd8c3efec3c9dcb3 \ - --hash=sha256:3d41b965b3380f10e4611dbae366f6dc3cefc7c9ac4e8842a806b9672ae9add5 \ - --hash=sha256:4c590ec31550a724ef893c50f9a97a0c14e9c851c85621c5650d699a7b88f7ab \ - --hash=sha256:549153378611c0cca1042f20fd9c5030d37a72f634c9326e225c9f666d472884 \ - --hash=sha256:63f9c17c0e2474ccbebc9302ce2f07b55b3b3fcb211ded18a42d5764f5c10a82 \ - --hash=sha256:6bc95ed67b6741b2607298f9ea4932ff157e570ef456ef7ff0ef4884a134cc4b \ - --hash=sha256:7099a8d55cd49b737ffc99c17de504f2257e3787e02abe6d1a6d136574873441 \ - --hash=sha256:75976c217f10d48a8b5a8de3d70c454c249e4b91851f6838a4e48b8f41eb71aa \ - --hash=sha256:7bc997818309f56c0038a33b8da5c0bfbb3f1f067f315f9abd6fc07ad359398d \ - --hash=sha256:80f49023dd13ba35f7c34072fa17f604d2f19bf0989f292cedf7ab5770b87a0b \ - --hash=sha256:91ce48d35f4e3d3f1d83e29ef4a9267246e6a3be51864a5b7d2247d5086fa99a \ - --hash=sha256:a958c52505c8adf0d3822703078580d2c0456dd1d27fabfb6f76fe63d2971cd6 \ - --hash=sha256:b62439d7cd1222f3da897e9a9fe53bbf5c104fff4d60893ad1355d4c14a24157 \ - --hash=sha256:b7f8dd0d4c1f21759695c05a5ec8536c12f31611541f8904083f3dc582604280 \ - --hash=sha256:d204833f3c8a33bbe11eda63a54b1aad7aa7456ed769a982f21ec599ba5fa282 \ - --hash=sha256:e007f052ed10cc316df59bc90fbb7ff7950d7e2919c9757fd42a2b8ecf8a5f67 \ - --hash=sha256:f2dcb0b3b63afb6df7fd94ec6fbddac81b5492513f7b0436210d390c14d46ee8 \ - --hash=sha256:f721d1885ecae9078c3f6bbe8a88bc0786b6e749bf32ccec1ef2b18929a05046 \ - --hash=sha256:f7a6de3e98771e183645181b3627e2563dcde3ce94a9e42a3f427d2255190327 \ - --hash=sha256:f8c0a6e9e1dd3eb0414ba320f85da6b0dcbd543126e30fcc546e7372a7fbf3b9 +cryptography==41.0.0 \ + --hash=sha256:0ddaee209d1cf1f180f1efa338a68c4621154de0afaef92b89486f5f96047c55 \ + --hash=sha256:14754bcdae909d66ff24b7b5f166d69340ccc6cb15731670435efd5719294895 \ + --hash=sha256:344c6de9f8bda3c425b3a41b319522ba3208551b70c2ae00099c205f0d9fd3be \ + --hash=sha256:34d405ea69a8b34566ba3dfb0521379b210ea5d560fafedf9f800a9a94a41928 \ + --hash=sha256:3680248309d340fda9611498a5319b0193a8dbdb73586a1acf8109d06f25b92d \ + --hash=sha256:3c5ef25d060c80d6d9f7f9892e1d41bb1c79b78ce74805b8cb4aa373cb7d5ec8 \ + --hash=sha256:4ab14d567f7bbe7f1cdff1c53d5324ed4d3fc8bd17c481b395db224fb405c237 \ + --hash=sha256:5c1f7293c31ebc72163a9a0df246f890d65f66b4a40d9ec80081969ba8c78cc9 \ + --hash=sha256:6b71f64beeea341c9b4f963b48ee3b62d62d57ba93eb120e1196b31dc1025e78 \ + --hash=sha256:7d92f0248d38faa411d17f4107fc0bce0c42cae0b0ba5415505df72d751bf62d \ + --hash=sha256:8362565b3835ceacf4dc8f3b56471a2289cf51ac80946f9087e66dc283a810e0 \ + --hash=sha256:84a165379cb9d411d58ed739e4af3396e544eac190805a54ba2e0322feb55c46 \ + --hash=sha256:88ff107f211ea696455ea8d911389f6d2b276aabf3231bf72c8853d22db755c5 \ + --hash=sha256:9f65e842cb02550fac96536edb1d17f24c0a338fd84eaf582be25926e993dde4 \ + --hash=sha256:a4fc68d1c5b951cfb72dfd54702afdbbf0fb7acdc9b7dc4301bbf2225a27714d \ + --hash=sha256:b7f2f5c525a642cecad24ee8670443ba27ac1fab81bba4cc24c7b6b41f2d0c75 \ + --hash=sha256:b846d59a8d5a9ba87e2c3d757ca019fa576793e8758174d3868aecb88d6fc8eb \ + --hash=sha256:bf8fc66012ca857d62f6a347007e166ed59c0bc150cefa49f28376ebe7d992a2 \ + --hash=sha256:f5d0bf9b252f30a31664b6f64432b4730bb7038339bd18b1fafe129cfc2be9be # via # gcp-releasetool # secretstorage @@ -148,23 +148,23 @@ filelock==3.8.0 \ --hash=sha256:55447caa666f2198c5b6b13a26d2084d26fa5b115c00d065664b2124680c4edc \ --hash=sha256:617eb4e5eedc82fc5f47b6d61e4d11cb837c56cb4544e39081099fa17ad109d4 # via virtualenv -gcp-docuploader==0.6.3 \ - --hash=sha256:ba8c9d76b3bbac54b0311c503a373b00edc2dc02d6d54ea9507045adb8e870f7 \ - --hash=sha256:c0f5aaa82ce1854a386197e4e359b120ad6d4e57ae2c812fce42219a3288026b +gcp-docuploader==0.6.4 \ + --hash=sha256:01486419e24633af78fd0167db74a2763974765ee8078ca6eb6964d0ebd388af \ + --hash=sha256:70861190c123d907b3b067da896265ead2eeb9263969d6955c9e0bb091b5ccbf # via -r requirements.in -gcp-releasetool==1.8.7 \ - --hash=sha256:3d2a67c9db39322194afb3b427e9cb0476ce8f2a04033695f0aeb63979fc2b37 \ - --hash=sha256:5e4d28f66e90780d77f3ecf1e9155852b0c3b13cbccb08ab07e66b2357c8da8d +gcp-releasetool==1.10.5 \ + --hash=sha256:174b7b102d704b254f2a26a3eda2c684fd3543320ec239baf771542a2e58e109 \ + --hash=sha256:e29d29927fe2ca493105a82958c6873bb2b90d503acac56be2c229e74de0eec9 # via -r requirements.in -google-api-core==2.8.2 \ - --hash=sha256:06f7244c640322b508b125903bb5701bebabce8832f85aba9335ec00b3d02edc \ - --hash=sha256:93c6a91ccac79079ac6bbf8b74ee75db970cc899278b97d53bc012f35908cf50 +google-api-core==2.10.2 \ + --hash=sha256:10c06f7739fe57781f87523375e8e1a3a4674bf6392cd6131a3222182b971320 \ + --hash=sha256:34f24bd1d5f72a8c4519773d99ca6bf080a6c4e041b4e9f024fe230191dda62e # via # google-cloud-core # google-cloud-storage -google-auth==2.11.0 \ - --hash=sha256:be62acaae38d0049c21ca90f27a23847245c9f161ff54ede13af2cb6afecbac9 \ - --hash=sha256:ed65ecf9f681832298e29328e1ef0a3676e3732b2e56f41532d45f70a22de0fb +google-auth==2.14.1 \ + --hash=sha256:ccaa901f31ad5cbb562615eb8b664b3dd0bf5404a67618e642307f00613eda4d \ + --hash=sha256:f5d8701633bebc12e0deea4df8abd8aff31c28b355360597f7f2ee60f2e4d016 # via # gcp-releasetool # google-api-core @@ -174,76 +174,102 @@ google-cloud-core==2.3.2 \ --hash=sha256:8417acf6466be2fa85123441696c4badda48db314c607cf1e5d543fa8bdc22fe \ --hash=sha256:b9529ee7047fd8d4bf4a2182de619154240df17fbe60ead399078c1ae152af9a # via google-cloud-storage -google-cloud-storage==2.5.0 \ - --hash=sha256:19a26c66c317ce542cea0830b7e787e8dac2588b6bfa4d3fd3b871ba16305ab0 \ - --hash=sha256:382f34b91de2212e3c2e7b40ec079d27ee2e3dbbae99b75b1bcd8c63063ce235 +google-cloud-storage==2.6.0 \ + --hash=sha256:104ca28ae61243b637f2f01455cc8a05e8f15a2a18ced96cb587241cdd3820f5 \ + --hash=sha256:4ad0415ff61abdd8bb2ae81c1f8f7ec7d91a1011613f2db87c614c550f97bfe9 # via gcp-docuploader -google-crc32c==1.3.0 \ - --hash=sha256:04e7c220798a72fd0f08242bc8d7a05986b2a08a0573396187fd32c1dcdd58b3 \ - --hash=sha256:05340b60bf05b574159e9bd940152a47d38af3fb43803ffe71f11d704b7696a6 \ - --hash=sha256:12674a4c3b56b706153a358eaa1018c4137a5a04635b92b4652440d3d7386206 \ - --hash=sha256:127f9cc3ac41b6a859bd9dc4321097b1a4f6aa7fdf71b4f9227b9e3ebffb4422 \ - --hash=sha256:13af315c3a0eec8bb8b8d80b8b128cb3fcd17d7e4edafc39647846345a3f003a \ - --hash=sha256:1926fd8de0acb9d15ee757175ce7242e235482a783cd4ec711cc999fc103c24e \ - --hash=sha256:226f2f9b8e128a6ca6a9af9b9e8384f7b53a801907425c9a292553a3a7218ce0 \ - --hash=sha256:276de6273eb074a35bc598f8efbc00c7869c5cf2e29c90748fccc8c898c244df \ - --hash=sha256:318f73f5484b5671f0c7f5f63741ab020a599504ed81d209b5c7129ee4667407 \ - --hash=sha256:3bbce1be3687bbfebe29abdb7631b83e6b25da3f4e1856a1611eb21854b689ea \ - --hash=sha256:42ae4781333e331a1743445931b08ebdad73e188fd554259e772556fc4937c48 \ - --hash=sha256:58be56ae0529c664cc04a9c76e68bb92b091e0194d6e3c50bea7e0f266f73713 \ - --hash=sha256:5da2c81575cc3ccf05d9830f9e8d3c70954819ca9a63828210498c0774fda1a3 \ - --hash=sha256:6311853aa2bba4064d0c28ca54e7b50c4d48e3de04f6770f6c60ebda1e975267 \ - --hash=sha256:650e2917660e696041ab3dcd7abac160b4121cd9a484c08406f24c5964099829 \ - --hash=sha256:6a4db36f9721fdf391646685ecffa404eb986cbe007a3289499020daf72e88a2 \ - --hash=sha256:779cbf1ce375b96111db98fca913c1f5ec11b1d870e529b1dc7354b2681a8c3a \ - --hash=sha256:7f6fe42536d9dcd3e2ffb9d3053f5d05221ae3bbcefbe472bdf2c71c793e3183 \ - --hash=sha256:891f712ce54e0d631370e1f4997b3f182f3368179198efc30d477c75d1f44942 \ - --hash=sha256:95c68a4b9b7828ba0428f8f7e3109c5d476ca44996ed9a5f8aac6269296e2d59 \ - --hash=sha256:96a8918a78d5d64e07c8ea4ed2bc44354e3f93f46a4866a40e8db934e4c0d74b \ - --hash=sha256:9c3cf890c3c0ecfe1510a452a165431b5831e24160c5fcf2071f0f85ca5a47cd \ - --hash=sha256:9f58099ad7affc0754ae42e6d87443299f15d739b0ce03c76f515153a5cda06c \ - --hash=sha256:a0b9e622c3b2b8d0ce32f77eba617ab0d6768b82836391e4f8f9e2074582bf02 \ - --hash=sha256:a7f9cbea4245ee36190f85fe1814e2d7b1e5f2186381b082f5d59f99b7f11328 \ - --hash=sha256:bab4aebd525218bab4ee615786c4581952eadc16b1ff031813a2fd51f0cc7b08 \ - --hash=sha256:c124b8c8779bf2d35d9b721e52d4adb41c9bfbde45e6a3f25f0820caa9aba73f \ - --hash=sha256:c9da0a39b53d2fab3e5467329ed50e951eb91386e9d0d5b12daf593973c3b168 \ - --hash=sha256:ca60076c388728d3b6ac3846842474f4250c91efbfe5afa872d3ffd69dd4b318 \ - --hash=sha256:cb6994fff247987c66a8a4e550ef374671c2b82e3c0d2115e689d21e511a652d \ - --hash=sha256:d1c1d6236feab51200272d79b3d3e0f12cf2cbb12b208c835b175a21efdb0a73 \ - --hash=sha256:dd7760a88a8d3d705ff562aa93f8445ead54f58fd482e4f9e2bafb7e177375d4 \ - --hash=sha256:dda4d8a3bb0b50f540f6ff4b6033f3a74e8bf0bd5320b70fab2c03e512a62812 \ - --hash=sha256:e0f1ff55dde0ebcfbef027edc21f71c205845585fffe30d4ec4979416613e9b3 \ - --hash=sha256:e7a539b9be7b9c00f11ef16b55486141bc2cdb0c54762f84e3c6fc091917436d \ - --hash=sha256:eb0b14523758e37802f27b7f8cd973f5f3d33be7613952c0df904b68c4842f0e \ - --hash=sha256:ed447680ff21c14aaceb6a9f99a5f639f583ccfe4ce1a5e1d48eb41c3d6b3217 \ - --hash=sha256:f52a4ad2568314ee713715b1e2d79ab55fab11e8b304fd1462ff5cccf4264b3e \ - --hash=sha256:fbd60c6aaa07c31d7754edbc2334aef50601b7f1ada67a96eb1eb57c7c72378f \ - --hash=sha256:fc28e0db232c62ca0c3600884933178f0825c99be4474cdd645e378a10588125 \ - --hash=sha256:fe31de3002e7b08eb20823b3735b97c86c5926dd0581c7710a680b418a8709d4 \ - --hash=sha256:fec221a051150eeddfdfcff162e6db92c65ecf46cb0f7bb1bf812a1520ec026b \ - --hash=sha256:ff71073ebf0e42258a42a0b34f2c09ec384977e7f6808999102eedd5b49920e3 +google-crc32c==1.5.0 \ + --hash=sha256:024894d9d3cfbc5943f8f230e23950cd4906b2fe004c72e29b209420a1e6b05a \ + --hash=sha256:02c65b9817512edc6a4ae7c7e987fea799d2e0ee40c53ec573a692bee24de876 \ + --hash=sha256:02ebb8bf46c13e36998aeaad1de9b48f4caf545e91d14041270d9dca767b780c \ + --hash=sha256:07eb3c611ce363c51a933bf6bd7f8e3878a51d124acfc89452a75120bc436289 \ + --hash=sha256:1034d91442ead5a95b5aaef90dbfaca8633b0247d1e41621d1e9f9db88c36298 \ + --hash=sha256:116a7c3c616dd14a3de8c64a965828b197e5f2d121fedd2f8c5585c547e87b02 \ + --hash=sha256:19e0a019d2c4dcc5e598cd4a4bc7b008546b0358bd322537c74ad47a5386884f \ + --hash=sha256:1c7abdac90433b09bad6c43a43af253e688c9cfc1c86d332aed13f9a7c7f65e2 \ + --hash=sha256:1e986b206dae4476f41bcec1faa057851f3889503a70e1bdb2378d406223994a \ + --hash=sha256:272d3892a1e1a2dbc39cc5cde96834c236d5327e2122d3aaa19f6614531bb6eb \ + --hash=sha256:278d2ed7c16cfc075c91378c4f47924c0625f5fc84b2d50d921b18b7975bd210 \ + --hash=sha256:2ad40e31093a4af319dadf503b2467ccdc8f67c72e4bcba97f8c10cb078207b5 \ + --hash=sha256:2e920d506ec85eb4ba50cd4228c2bec05642894d4c73c59b3a2fe20346bd00ee \ + --hash=sha256:3359fc442a743e870f4588fcf5dcbc1bf929df1fad8fb9905cd94e5edb02e84c \ + --hash=sha256:37933ec6e693e51a5b07505bd05de57eee12f3e8c32b07da7e73669398e6630a \ + --hash=sha256:398af5e3ba9cf768787eef45c803ff9614cc3e22a5b2f7d7ae116df8b11e3314 \ + --hash=sha256:3b747a674c20a67343cb61d43fdd9207ce5da6a99f629c6e2541aa0e89215bcd \ + --hash=sha256:461665ff58895f508e2866824a47bdee72497b091c730071f2b7575d5762ab65 \ + --hash=sha256:4c6fdd4fccbec90cc8a01fc00773fcd5fa28db683c116ee3cb35cd5da9ef6c37 \ + --hash=sha256:5829b792bf5822fd0a6f6eb34c5f81dd074f01d570ed7f36aa101d6fc7a0a6e4 \ + --hash=sha256:596d1f98fc70232fcb6590c439f43b350cb762fb5d61ce7b0e9db4539654cc13 \ + --hash=sha256:5ae44e10a8e3407dbe138984f21e536583f2bba1be9491239f942c2464ac0894 \ + --hash=sha256:635f5d4dd18758a1fbd1049a8e8d2fee4ffed124462d837d1a02a0e009c3ab31 \ + --hash=sha256:64e52e2b3970bd891309c113b54cf0e4384762c934d5ae56e283f9a0afcd953e \ + --hash=sha256:66741ef4ee08ea0b2cc3c86916ab66b6aef03768525627fd6a1b34968b4e3709 \ + --hash=sha256:67b741654b851abafb7bc625b6d1cdd520a379074e64b6a128e3b688c3c04740 \ + --hash=sha256:6ac08d24c1f16bd2bf5eca8eaf8304812f44af5cfe5062006ec676e7e1d50afc \ + --hash=sha256:6f998db4e71b645350b9ac28a2167e6632c239963ca9da411523bb439c5c514d \ + --hash=sha256:72218785ce41b9cfd2fc1d6a017dc1ff7acfc4c17d01053265c41a2c0cc39b8c \ + --hash=sha256:74dea7751d98034887dbd821b7aae3e1d36eda111d6ca36c206c44478035709c \ + --hash=sha256:759ce4851a4bb15ecabae28f4d2e18983c244eddd767f560165563bf9aefbc8d \ + --hash=sha256:77e2fd3057c9d78e225fa0a2160f96b64a824de17840351b26825b0848022906 \ + --hash=sha256:7c074fece789b5034b9b1404a1f8208fc2d4c6ce9decdd16e8220c5a793e6f61 \ + --hash=sha256:7c42c70cd1d362284289c6273adda4c6af8039a8ae12dc451dcd61cdabb8ab57 \ + --hash=sha256:7f57f14606cd1dd0f0de396e1e53824c371e9544a822648cd76c034d209b559c \ + --hash=sha256:83c681c526a3439b5cf94f7420471705bbf96262f49a6fe546a6db5f687a3d4a \ + --hash=sha256:8485b340a6a9e76c62a7dce3c98e5f102c9219f4cfbf896a00cf48caf078d438 \ + --hash=sha256:84e6e8cd997930fc66d5bb4fde61e2b62ba19d62b7abd7a69920406f9ecca946 \ + --hash=sha256:89284716bc6a5a415d4eaa11b1726d2d60a0cd12aadf5439828353662ede9dd7 \ + --hash=sha256:8b87e1a59c38f275c0e3676fc2ab6d59eccecfd460be267ac360cc31f7bcde96 \ + --hash=sha256:8f24ed114432de109aa9fd317278518a5af2d31ac2ea6b952b2f7782b43da091 \ + --hash=sha256:98cb4d057f285bd80d8778ebc4fde6b4d509ac3f331758fb1528b733215443ae \ + --hash=sha256:998679bf62b7fb599d2878aa3ed06b9ce688b8974893e7223c60db155f26bd8d \ + --hash=sha256:9ba053c5f50430a3fcfd36f75aff9caeba0440b2d076afdb79a318d6ca245f88 \ + --hash=sha256:9c99616c853bb585301df6de07ca2cadad344fd1ada6d62bb30aec05219c45d2 \ + --hash=sha256:a1fd716e7a01f8e717490fbe2e431d2905ab8aa598b9b12f8d10abebb36b04dd \ + --hash=sha256:a2355cba1f4ad8b6988a4ca3feed5bff33f6af2d7f134852cf279c2aebfde541 \ + --hash=sha256:b1f8133c9a275df5613a451e73f36c2aea4fe13c5c8997e22cf355ebd7bd0728 \ + --hash=sha256:b8667b48e7a7ef66afba2c81e1094ef526388d35b873966d8a9a447974ed9178 \ + --hash=sha256:ba1eb1843304b1e5537e1fca632fa894d6f6deca8d6389636ee5b4797affb968 \ + --hash=sha256:be82c3c8cfb15b30f36768797a640e800513793d6ae1724aaaafe5bf86f8f346 \ + --hash=sha256:c02ec1c5856179f171e032a31d6f8bf84e5a75c45c33b2e20a3de353b266ebd8 \ + --hash=sha256:c672d99a345849301784604bfeaeba4db0c7aae50b95be04dd651fd2a7310b93 \ + --hash=sha256:c6c777a480337ac14f38564ac88ae82d4cd238bf293f0a22295b66eb89ffced7 \ + --hash=sha256:cae0274952c079886567f3f4f685bcaf5708f0a23a5f5216fdab71f81a6c0273 \ + --hash=sha256:cd67cf24a553339d5062eff51013780a00d6f97a39ca062781d06b3a73b15462 \ + --hash=sha256:d3515f198eaa2f0ed49f8819d5732d70698c3fa37384146079b3799b97667a94 \ + --hash=sha256:d5280312b9af0976231f9e317c20e4a61cd2f9629b7bfea6a693d1878a264ebd \ + --hash=sha256:de06adc872bcd8c2a4e0dc51250e9e65ef2ca91be023b9d13ebd67c2ba552e1e \ + --hash=sha256:e1674e4307fa3024fc897ca774e9c7562c957af85df55efe2988ed9056dc4e57 \ + --hash=sha256:e2096eddb4e7c7bdae4bd69ad364e55e07b8316653234a56552d9c988bd2d61b \ + --hash=sha256:e560628513ed34759456a416bf86b54b2476c59144a9138165c9a1575801d0d9 \ + --hash=sha256:edfedb64740750e1a3b16152620220f51d58ff1b4abceb339ca92e934775c27a \ + --hash=sha256:f13cae8cc389a440def0c8c52057f37359014ccbc9dc1f0827936bcd367c6100 \ + --hash=sha256:f314013e7dcd5cf45ab1945d92e713eec788166262ae8deb2cfacd53def27325 \ + --hash=sha256:f583edb943cf2e09c60441b910d6a20b4d9d626c75a36c8fcac01a6c96c01183 \ + --hash=sha256:fd8536e902db7e365f49e7d9029283403974ccf29b13fc7028b97e2295b33556 \ + --hash=sha256:fe70e325aa68fa4b5edf7d1a4b6f691eb04bbccac0ace68e34820d283b5f80d4 # via google-resumable-media -google-resumable-media==2.3.3 \ - --hash=sha256:27c52620bd364d1c8116eaac4ea2afcbfb81ae9139fb3199652fcac1724bfb6c \ - --hash=sha256:5b52774ea7a829a8cdaa8bd2d4c3d4bc660c91b30857ab2668d0eb830f4ea8c5 +google-resumable-media==2.4.0 \ + --hash=sha256:2aa004c16d295c8f6c33b2b4788ba59d366677c0a25ae7382436cb30f776deaa \ + --hash=sha256:8d5518502f92b9ecc84ac46779bd4f09694ecb3ba38a3e7ca737a86d15cbca1f # via google-cloud-storage -googleapis-common-protos==1.56.4 \ - --hash=sha256:8eb2cbc91b69feaf23e32452a7ae60e791e09967d81d4fcc7fc388182d1bd394 \ - --hash=sha256:c25873c47279387cfdcbdafa36149887901d36202cb645a0e4f29686bf6e4417 +googleapis-common-protos==1.57.0 \ + --hash=sha256:27a849d6205838fb6cc3c1c21cb9800707a661bb21c6ce7fb13e99eb1f8a0c46 \ + --hash=sha256:a9f4a1d7f6d9809657b7f1316a1aa527f6664891531bcfcc13b6696e685f443c # via google-api-core -idna==3.3 \ - --hash=sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff \ - --hash=sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d +idna==3.4 \ + --hash=sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4 \ + --hash=sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2 # via requests -importlib-metadata==4.12.0 \ - --hash=sha256:637245b8bab2b6502fcbc752cc4b7a6f6243bb02b31c5c26156ad103d3d45670 \ - --hash=sha256:7401a975809ea1fdc658c3aa4f78cc2195a0e019c5cbc4c06122884e9ae80c23 +importlib-metadata==5.0.0 \ + --hash=sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab \ + --hash=sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43 # via # -r requirements.in + # keyring # twine -jaraco-classes==3.2.2 \ - --hash=sha256:6745f113b0b588239ceb49532aa09c3ebb947433ce311ef2f8e3ad64ebb74594 \ - --hash=sha256:e6ef6fd3fcf4579a7a019d87d1e56a883f4e4c35cfe925f86731abc58804e647 +jaraco-classes==3.2.3 \ + --hash=sha256:2353de3288bc6b82120752201c6b1c1a14b058267fa424ed5ce5984e3b922158 \ + --hash=sha256:89559fa5c1d3c34eff6f631ad80bb21f378dbcbb35dd161fd2c6b93f5be2f98a # via keyring jeepney==0.8.0 \ --hash=sha256:5efe48d255973902f6badc3ce55e2aa6c5c3b3bc642059ef3a91247bcfcc5806 \ @@ -255,9 +281,9 @@ jinja2==3.1.2 \ --hash=sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852 \ --hash=sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61 # via gcp-releasetool -keyring==23.9.0 \ - --hash=sha256:4c32a31174faaee48f43a7e2c7e9c3216ec5e95acf22a2bebfb4a1d05056ee44 \ - --hash=sha256:98f060ec95ada2ab910c195a2d4317be6ef87936a766b239c46aa3c7aac4f0db +keyring==23.11.0 \ + --hash=sha256:3dd30011d555f1345dec2c262f0153f2f0ca6bca041fb1dc4588349bb4c0ac1e \ + --hash=sha256:ad192263e2cdd5f12875dedc2da13534359a7e760e77f8d04b50968a821c2361 # via # gcp-releasetool # twine @@ -303,13 +329,13 @@ markupsafe==2.1.1 \ --hash=sha256:f121a1420d4e173a5d96e47e9a0c0dcff965afdf1626d28de1460815f7c4ee7a \ --hash=sha256:fc7b548b17d238737688817ab67deebb30e8073c95749d55538ed473130ec0c7 # via jinja2 -more-itertools==8.14.0 \ - --hash=sha256:1bc4f91ee5b1b31ac7ceacc17c09befe6a40a503907baf9c839c229b5095cfd2 \ - --hash=sha256:c09443cd3d5438b8dafccd867a6bc1cb0894389e90cb53d227456b0b0bccb750 +more-itertools==9.0.0 \ + --hash=sha256:250e83d7e81d0c87ca6bd942e6aeab8cc9daa6096d12c5308f3f92fa5e5c1f41 \ + --hash=sha256:5a6257e40878ef0520b1803990e3e22303a41b5714006c32a3fd8304b26ea1ab # via jaraco-classes -nox==2022.8.7 \ - --hash=sha256:1b894940551dc5c389f9271d197ca5d655d40bdc6ccf93ed6880e4042760a34b \ - --hash=sha256:96cca88779e08282a699d672258ec01eb7c792d35bbbf538c723172bce23212c +nox==2022.11.21 \ + --hash=sha256:0e41a990e290e274cb205a976c4c97ee3c5234441a8132c8c3fd9ea3c22149eb \ + --hash=sha256:e21c31de0711d1274ca585a2c5fde36b1aa962005ba8e9322bf5eeed16dcd684 # via -r requirements.in packaging==21.3 \ --hash=sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb \ @@ -321,42 +347,37 @@ pkginfo==1.8.3 \ --hash=sha256:848865108ec99d4901b2f7e84058b6e7660aae8ae10164e015a6dcf5b242a594 \ --hash=sha256:a84da4318dd86f870a9447a8c98340aa06216bfc6f2b7bdc4b8766984ae1867c # via twine -platformdirs==2.5.2 \ - --hash=sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788 \ - --hash=sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19 +platformdirs==2.5.4 \ + --hash=sha256:1006647646d80f16130f052404c6b901e80ee4ed6bef6792e1f238a8969106f7 \ + --hash=sha256:af0276409f9a02373d540bf8480021a048711d572745aef4b7842dad245eba10 # via virtualenv -protobuf==3.20.2 \ - --hash=sha256:03d76b7bd42ac4a6e109742a4edf81ffe26ffd87c5993126d894fe48a120396a \ - --hash=sha256:09e25909c4297d71d97612f04f41cea8fa8510096864f2835ad2f3b3df5a5559 \ - --hash=sha256:18e34a10ae10d458b027d7638a599c964b030c1739ebd035a1dfc0e22baa3bfe \ - --hash=sha256:291fb4307094bf5ccc29f424b42268640e00d5240bf0d9b86bf3079f7576474d \ - --hash=sha256:2c0b040d0b5d5d207936ca2d02f00f765906622c07d3fa19c23a16a8ca71873f \ - --hash=sha256:384164994727f274cc34b8abd41a9e7e0562801361ee77437099ff6dfedd024b \ - --hash=sha256:3cb608e5a0eb61b8e00fe641d9f0282cd0eedb603be372f91f163cbfbca0ded0 \ - --hash=sha256:5d9402bf27d11e37801d1743eada54372f986a372ec9679673bfcc5c60441151 \ - --hash=sha256:712dca319eee507a1e7df3591e639a2b112a2f4a62d40fe7832a16fd19151750 \ - --hash=sha256:7a5037af4e76c975b88c3becdf53922b5ffa3f2cddf657574a4920a3b33b80f3 \ - --hash=sha256:8228e56a865c27163d5d1d1771d94b98194aa6917bcfb6ce139cbfa8e3c27334 \ - --hash=sha256:84a1544252a933ef07bb0b5ef13afe7c36232a774affa673fc3636f7cee1db6c \ - --hash=sha256:84fe5953b18a383fd4495d375fe16e1e55e0a3afe7b4f7b4d01a3a0649fcda9d \ - --hash=sha256:9c673c8bfdf52f903081816b9e0e612186684f4eb4c17eeb729133022d6032e3 \ - --hash=sha256:9f876a69ca55aed879b43c295a328970306e8e80a263ec91cf6e9189243c613b \ - --hash=sha256:a9e5ae5a8e8985c67e8944c23035a0dff2c26b0f5070b2f55b217a1c33bbe8b1 \ - --hash=sha256:b4fdb29c5a7406e3f7ef176b2a7079baa68b5b854f364c21abe327bbeec01cdb \ - --hash=sha256:c184485e0dfba4dfd451c3bd348c2e685d6523543a0f91b9fd4ae90eb09e8422 \ - --hash=sha256:c9cdf251c582c16fd6a9f5e95836c90828d51b0069ad22f463761d27c6c19019 \ - --hash=sha256:e39cf61bb8582bda88cdfebc0db163b774e7e03364bbf9ce1ead13863e81e359 \ - --hash=sha256:e8fbc522303e09036c752a0afcc5c0603e917222d8bedc02813fd73b4b4ed804 \ - --hash=sha256:f34464ab1207114e73bba0794d1257c150a2b89b7a9faf504e00af7c9fd58978 \ - --hash=sha256:f52dabc96ca99ebd2169dadbe018824ebda08a795c7684a0b7d203a290f3adb0 +protobuf==3.20.3 \ + --hash=sha256:03038ac1cfbc41aa21f6afcbcd357281d7521b4157926f30ebecc8d4ea59dcb7 \ + --hash=sha256:28545383d61f55b57cf4df63eebd9827754fd2dc25f80c5253f9184235db242c \ + --hash=sha256:2e3427429c9cffebf259491be0af70189607f365c2f41c7c3764af6f337105f2 \ + --hash=sha256:398a9e0c3eaceb34ec1aee71894ca3299605fa8e761544934378bbc6c97de23b \ + --hash=sha256:44246bab5dd4b7fbd3c0c80b6f16686808fab0e4aca819ade6e8d294a29c7050 \ + --hash=sha256:447d43819997825d4e71bf5769d869b968ce96848b6479397e29fc24c4a5dfe9 \ + --hash=sha256:67a3598f0a2dcbc58d02dd1928544e7d88f764b47d4a286202913f0b2801c2e7 \ + --hash=sha256:74480f79a023f90dc6e18febbf7b8bac7508420f2006fabd512013c0c238f454 \ + --hash=sha256:819559cafa1a373b7096a482b504ae8a857c89593cf3a25af743ac9ecbd23480 \ + --hash=sha256:899dc660cd599d7352d6f10d83c95df430a38b410c1b66b407a6b29265d66469 \ + --hash=sha256:8c0c984a1b8fef4086329ff8dd19ac77576b384079247c770f29cc8ce3afa06c \ + --hash=sha256:9aae4406ea63d825636cc11ffb34ad3379335803216ee3a856787bcf5ccc751e \ + --hash=sha256:a7ca6d488aa8ff7f329d4c545b2dbad8ac31464f1d8b1c87ad1346717731e4db \ + --hash=sha256:b6cc7ba72a8850621bfec987cb72623e703b7fe2b9127a161ce61e61558ad905 \ + --hash=sha256:bf01b5720be110540be4286e791db73f84a2b721072a3711efff6c324cdf074b \ + --hash=sha256:c02ce36ec760252242a33967d51c289fd0e1c0e6e5cc9397e2279177716add86 \ + --hash=sha256:d9e4432ff660d67d775c66ac42a67cf2453c27cb4d738fc22cb53b5d84c135d4 \ + --hash=sha256:daa564862dd0d39c00f8086f88700fdbe8bc717e993a21e90711acfed02f2402 \ + --hash=sha256:de78575669dddf6099a8a0f46a27e82a1783c557ccc38ee620ed8cc96d3be7d7 \ + --hash=sha256:e64857f395505ebf3d2569935506ae0dfc4a15cb80dc25261176c784662cdcc4 \ + --hash=sha256:f4bd856d702e5b0d96a00ec6b307b0f51c1982c2bf9c0052cf9019e9a544ba99 \ + --hash=sha256:f4c42102bc82a51108e449cbb32b19b180022941c727bac0cfd50170341f16ee # via # gcp-docuploader # gcp-releasetool # google-api-core -py==1.11.0 \ - --hash=sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719 \ - --hash=sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378 - # via nox pyasn1==0.4.8 \ --hash=sha256:39c7e2ec30515947ff4e87fb6f456dfc6e84857d34be479c9d4a4ba4bf46aa5d \ --hash=sha256:aef77c9fb94a3ac588e87841208bdec464471d9871bd5050a287cc9a475cd0ba @@ -377,9 +398,9 @@ pygments==2.13.0 \ # via # readme-renderer # rich -pyjwt==2.4.0 \ - --hash=sha256:72d1d253f32dbd4f5c88eaf1fdc62f3a19f676ccbadb9dbc5d07e951b2b26daf \ - --hash=sha256:d42908208c699b3b973cbeb01a969ba6a96c821eefb1c5bfe4c390c01d67abba +pyjwt==2.6.0 \ + --hash=sha256:69285c7e31fc44f68a1feb309e948e0df53259d579295e6cfe2b1792329f05fd \ + --hash=sha256:d83c3d892a77bbb74d3e1a2cfa90afaadb60945205d1095d9221f04466f64c14 # via gcp-releasetool pyparsing==3.0.9 \ --hash=sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb \ @@ -392,30 +413,30 @@ python-dateutil==2.8.2 \ --hash=sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86 \ --hash=sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9 # via gcp-releasetool -readme-renderer==37.0 \ - --hash=sha256:07b7ea234e03e58f77cc222e206e6abb8f4c0435becce5104794ee591f9301c5 \ - --hash=sha256:9fa416704703e509eeb900696751c908ddeb2011319d93700d8f18baff887a69 +readme-renderer==37.3 \ + --hash=sha256:cd653186dfc73055656f090f227f5cb22a046d7f71a841dfa305f55c9a513273 \ + --hash=sha256:f67a16caedfa71eef48a31b39708637a6f4664c4394801a7b0d6432d13907343 # via twine -requests==2.28.1 \ - --hash=sha256:7c5599b102feddaa661c826c56ab4fee28bfd17f5abca1ebbe3e7f19d7c97983 \ - --hash=sha256:8fefa2a1a1365bf5520aac41836fbee479da67864514bdb821f31ce07ce65349 +requests==2.31.0 \ + --hash=sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f \ + --hash=sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1 # via # gcp-releasetool # google-api-core # google-cloud-storage # requests-toolbelt # twine -requests-toolbelt==0.9.1 \ - --hash=sha256:380606e1d10dc85c3bd47bf5a6095f815ec007be7a8b69c878507068df059e6f \ - --hash=sha256:968089d4584ad4ad7c171454f0a5c6dac23971e9472521ea3b6d49d610aa6fc0 +requests-toolbelt==0.10.1 \ + --hash=sha256:18565aa58116d9951ac39baa288d3adb5b3ff975c4f25eee78555d89e8f247f7 \ + --hash=sha256:62e09f7ff5ccbda92772a29f394a49c3ad6cb181d568b1337626b2abb628a63d # via twine rfc3986==2.0.0 \ --hash=sha256:50b1502b60e289cb37883f3dfd34532b8873c7de9f49bb546641ce9cbd256ebd \ --hash=sha256:97aacf9dbd4bfd829baad6e6309fa6573aaf1be3f6fa735c8ab05e46cecb261c # via twine -rich==12.5.1 \ - --hash=sha256:2eb4e6894cde1e017976d2975ac210ef515d7548bc595ba20e195fb9628acdeb \ - --hash=sha256:63a5c5ce3673d3d5fbbf23cd87e11ab84b6b451436f1b7f19ec54b6bc36ed7ca +rich==12.6.0 \ + --hash=sha256:a4eb26484f2c82589bd9a17c73d32a010b1e29d89f1604cd9bf3a2097b81bb5e \ + --hash=sha256:ba3a3775974105c221d31141f2c116f4fd65c5ceb0698657a11e9f295ec93fd0 # via twine rsa==4.9 \ --hash=sha256:90260d9058e514786967344d0ef75fa8727eed8a7d2e43ce9f4bcf1b536174f7 \ @@ -437,9 +458,9 @@ twine==4.0.1 \ --hash=sha256:42026c18e394eac3e06693ee52010baa5313e4811d5a11050e7d48436cf41b9e \ --hash=sha256:96b1cf12f7ae611a4a40b6ae8e9570215daff0611828f5fe1f37a16255ab24a0 # via -r requirements.in -typing-extensions==4.3.0 \ - --hash=sha256:25642c956049920a5aa49edcdd6ab1e06d7e5d467fc00e0506c44ac86fbfca02 \ - --hash=sha256:e6d2677a32f47fc7eb2795db1dd15c1f34eff616bcaf2cfb5e997f854fa1c4a6 +typing-extensions==4.4.0 \ + --hash=sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa \ + --hash=sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e # via -r requirements.in urllib3==1.26.12 \ --hash=sha256:3fa96cf423e6987997fc326ae8df396db2a8b7c667747d47ddd8ecba91f4a74e \ @@ -447,25 +468,25 @@ urllib3==1.26.12 \ # via # requests # twine -virtualenv==20.16.4 \ - --hash=sha256:014f766e4134d0008dcaa1f95bafa0fb0f575795d07cae50b1bee514185d6782 \ - --hash=sha256:035ed57acce4ac35c82c9d8802202b0e71adac011a511ff650cbcf9635006a22 +virtualenv==20.16.7 \ + --hash=sha256:8691e3ff9387f743e00f6bb20f70121f5e4f596cae754531f2b3b3a1b1ac696e \ + --hash=sha256:efd66b00386fdb7dbe4822d172303f40cd05e50e01740b19ea42425cbe653e29 # via nox webencodings==0.5.1 \ --hash=sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78 \ --hash=sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923 # via bleach -wheel==0.37.1 \ - --hash=sha256:4bdcd7d840138086126cd09254dc6195fb4fc6f01c050a1d7236f2630db1d22a \ - --hash=sha256:e9a504e793efbca1b8e0e9cb979a249cf4a0a7b5b8c9e8b65a5e39d49529c1c4 +wheel==0.38.4 \ + --hash=sha256:965f5259b566725405b05e7cf774052044b1ed30119b5d586b2703aafe8719ac \ + --hash=sha256:b60533f3f5d530e971d6737ca6d58681ee434818fab630c83a734bb10c083ce8 # via -r requirements.in -zipp==3.8.1 \ - --hash=sha256:05b45f1ee8f807d0cc928485ca40a07cb491cf092ff587c0df9cb1fd154848d2 \ - --hash=sha256:47c40d7fe183a6f21403a199b3e4192cca5774656965b0a4988ad2f8feb5f009 +zipp==3.10.0 \ + --hash=sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1 \ + --hash=sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: -setuptools==65.2.0 \ - --hash=sha256:7f4bc85450898a09f76ebf28b72fa25bc7111f6c7d665d514a60bba9c75ef2a9 \ - --hash=sha256:a3ca5857c89f82f5c9410e8508cb32f4872a3bafd4aa7ae122a24ca33bccc750 +setuptools==65.5.1 \ + --hash=sha256:d0b9a8433464d5800cbe05094acf5c6d52a91bfac9b52bcfc4d41382be5d5d31 \ + --hash=sha256:e197a19aa8ec9722928f2206f8de752def0e4c9fc6953527360d1c36d94ddb2f # via -r requirements.in diff --git a/.kokoro/samples/python3.11/common.cfg b/.kokoro/samples/python3.11/common.cfg new file mode 100644 index 0000000000..c870d5b2c7 --- /dev/null +++ b/.kokoro/samples/python3.11/common.cfg @@ -0,0 +1,40 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Specify which tests to run +env_vars: { + key: "RUN_TESTS_SESSION" + value: "py-3.11" +} + +# Declare build specific Cloud project. +env_vars: { + key: "BUILD_SPECIFIC_GCLOUD_PROJECT" + value: "ucaip-sample-tests" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-aiplatform/.kokoro/test-samples.sh" +} + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/python-samples-testing-docker" +} + +# Download secrets for samples +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/python-docs-samples" + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "python-aiplatform/.kokoro/trampoline_v2.sh" \ No newline at end of file diff --git a/.kokoro/samples/python3.11/continuous.cfg b/.kokoro/samples/python3.11/continuous.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.11/continuous.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.kokoro/samples/python3.11/periodic-head.cfg b/.kokoro/samples/python3.11/periodic-head.cfg new file mode 100644 index 0000000000..88d5235e34 --- /dev/null +++ b/.kokoro/samples/python3.11/periodic-head.cfg @@ -0,0 +1,11 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/python-aiplatform/.kokoro/test-samples-against-head.sh" +} diff --git a/.kokoro/samples/python3.11/periodic.cfg b/.kokoro/samples/python3.11/periodic.cfg new file mode 100644 index 0000000000..71cd1e597e --- /dev/null +++ b/.kokoro/samples/python3.11/periodic.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "False" +} diff --git a/.kokoro/samples/python3.11/presubmit.cfg b/.kokoro/samples/python3.11/presubmit.cfg new file mode 100644 index 0000000000..a1c8d9759c --- /dev/null +++ b/.kokoro/samples/python3.11/presubmit.cfg @@ -0,0 +1,6 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +env_vars: { + key: "INSTALL_LIBRARY_FROM_SOURCE" + value: "True" +} \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 46d237160f..5405cc8ff1 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -25,7 +25,7 @@ repos: rev: 22.3.0 hooks: - id: black -- repo: https://gitlab.com/pycqa/flake8 +- repo: https://github.com/pycqa/flake8 rev: 3.9.2 hooks: - id: flake8 diff --git a/.release-please-manifest.json b/.release-please-manifest.json index 1f44e4d69b..563c04b797 100644 --- a/.release-please-manifest.json +++ b/.release-please-manifest.json @@ -1,3 +1,3 @@ { - ".": "1.26.1" + ".": "1.27.0" } diff --git a/CHANGELOG.md b/CHANGELOG.md index c4c343032e..0bf633b03a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,32 @@ # Changelog +## [1.27.0](https://github.com/googleapis/python-aiplatform/compare/v1.26.1...v1.27.0) (2023-06-30) + + +### Features + +* Add submit for CustomTrainingJob and CustomContainerTrainingJob which won't block until complete. ([d6476d0](https://github.com/googleapis/python-aiplatform/commit/d6476d0ed1440f58301d8be0b9043b13aa8d910d)) +* LLM - Added support for `learning_rate` in tuning ([c6cdd10](https://github.com/googleapis/python-aiplatform/commit/c6cdd108b5f3469340ca16bc6ef56efc55723ce3)) +* LLM - Released the Codey models to GA ([89609c9](https://github.com/googleapis/python-aiplatform/commit/89609c9007684e5cbc4b3e5c30d26c0d5539aa39)) + + +### Bug Fixes + +* Fix aiplatform.init bug by replacing experiment_name with experiment ([c60773a](https://github.com/googleapis/python-aiplatform/commit/c60773a7db8ce7a59d2cb5787dc90937776c0b8f)) +* Fix error when calling update_state() after ExperimentRun.list() ([cb255ec](https://github.com/googleapis/python-aiplatform/commit/cb255ec514995e193df7122dbf67bbd4011e6630)) +* LLM - Exported the `ChatMessage` class ([7bf7634](https://github.com/googleapis/python-aiplatform/commit/7bf7634e97dfe56c3130264eeb62a9b5d6b55cac)) +* LLM - Fixed the chat models failing due to safetyAttributes format ([459ba86](https://github.com/googleapis/python-aiplatform/commit/459ba86396ab9260fd7b28a1524c051b7ad300a5)) +* Vizier - Fixed pyvizier client study creation errors ([16299d1](https://github.com/googleapis/python-aiplatform/commit/16299d14b8f209218d6576614f773c1bcbd21d64)) + + +### Documentation + +* Fixed a docstring for _Dataset ([b68a941](https://github.com/googleapis/python-aiplatform/commit/b68a941853f9c38b0ff30f5d07cea1d7fb0700a6)) +* Fixed a docstring for TimeSeriesDataset ([a7dfce2](https://github.com/googleapis/python-aiplatform/commit/a7dfce217eebbef0877053b9c0f6f6127b556e82)) +* Populate GA LLM SDK Pydocs ([e248285](https://github.com/googleapis/python-aiplatform/commit/e248285b5da4c33a68ccd6198ce7b1d8ab20febf)) +* Update scheduled pipelines client max_run_count docstring with allowed values. ([750e161](https://github.com/googleapis/python-aiplatform/commit/750e16179e1a53bc916ae6db93cd28cfd3f911fe)) + ## [1.26.1](https://github.com/googleapis/python-aiplatform/compare/v1.26.0...v1.26.1) (2023-06-21) diff --git a/docs/vertexai/services.rst b/docs/vertexai/services.rst index bdf5234132..92ad7aacbe 100644 --- a/docs/vertexai/services.rst +++ b/docs/vertexai/services.rst @@ -6,6 +6,11 @@ Vertex AI SDK :show-inheritance: :inherited-members: +.. automodule:: vertexai.language_models + :members: + :show-inheritance: + :inherited-members: + .. automodule:: vertexai.preview.language_models :members: :show-inheritance: diff --git a/google/cloud/aiplatform/datasets/dataset.py b/google/cloud/aiplatform/datasets/dataset.py index 211e307fb4..ea7a6cc059 100644 --- a/google/cloud/aiplatform/datasets/dataset.py +++ b/google/cloud/aiplatform/datasets/dataset.py @@ -187,11 +187,11 @@ def create( request_metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. labels (Dict[str, str]): - Optional. Labels with user-defined metadata to organize your Tensorboards. + Optional. Labels with user-defined metadata to organize your datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. - No more than 64 user labels can be associated with one Tensorboard + No more than 64 user labels can be associated with one Dataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" diff --git a/google/cloud/aiplatform/datasets/time_series_dataset.py b/google/cloud/aiplatform/datasets/time_series_dataset.py index c3eb4e6363..7f55d455a5 100644 --- a/google/cloud/aiplatform/datasets/time_series_dataset.py +++ b/google/cloud/aiplatform/datasets/time_series_dataset.py @@ -78,11 +78,11 @@ def create( request_metadata (Sequence[Tuple[str, str]]): Strings which should be sent along with the request as metadata. labels (Dict[str, str]): - Optional. Labels with user-defined metadata to organize your Tensorboards. + Optional. Labels with user-defined metadata to organize your datasets. Label keys and values can be no longer than 64 characters (Unicode codepoints), can only contain lowercase letters, numeric characters, underscores and dashes. International characters are allowed. - No more than 64 user labels can be associated with one Tensorboard + No more than 64 user labels can be associated with one TimeSeriesDataset (System labels are excluded). See https://goo.gl/xmQnxf for more information and examples of labels. System reserved label keys are prefixed with "aiplatform.googleapis.com/" diff --git a/google/cloud/aiplatform/gapic_version.py b/google/cloud/aiplatform/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/gapic_version.py +++ b/google/cloud/aiplatform/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/metadata/resource.py b/google/cloud/aiplatform/metadata/resource.py index 3e4d0ddc23..580bd6c420 100644 --- a/google/cloud/aiplatform/metadata/resource.py +++ b/google/cloud/aiplatform/metadata/resource.py @@ -297,6 +297,8 @@ def update( Custom credentials to use to update this resource. Overrides credentials set in aiplatform.init. """ + if not hasattr(self, "_threading_lock"): + self._threading_lock = threading.Lock() with self._threading_lock: gca_resource = deepcopy(self._gca_resource) diff --git a/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py b/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py index 2c3bba29fa..8e1a58da30 100644 --- a/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py +++ b/google/cloud/aiplatform/preview/pipelinejob/pipeline_jobs.py @@ -98,6 +98,7 @@ def create_schedule( max_run_count (int): Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count >= max_run_count or when end_time is reached. + Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. service_account (str): diff --git a/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py b/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py index 008d32ab84..b27621eaa1 100644 --- a/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py +++ b/google/cloud/aiplatform/preview/pipelinejobschedule/pipeline_job_schedules.py @@ -145,6 +145,7 @@ def create( max_run_count (int): Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count >= max_run_count or when end_time is reached. + Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. service_account (str): @@ -204,6 +205,7 @@ def _create( max_run_count (int): Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count >= max_run_count or when end_time is reached. + Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. service_account (str): @@ -402,6 +404,7 @@ def update( max_run_count (int): Optional. Maximum run count of the schedule. If specified, The schedule will be completed when either started_run_count >= max_run_count or when end_time is reached. + Must be positive and <= 2^63-1. max_concurrent_run_count (int): Optional. Maximum number of runs that can be started concurrently for this PipelineJobSchedule. diff --git a/google/cloud/aiplatform/tensorboard/uploader_tracker.py b/google/cloud/aiplatform/tensorboard/uploader_tracker.py index daf0f6541d..b99d1028ad 100644 --- a/google/cloud/aiplatform/tensorboard/uploader_tracker.py +++ b/google/cloud/aiplatform/tensorboard/uploader_tracker.py @@ -111,7 +111,7 @@ def start_upload_tb_log( Args: tensorboard_experiment_name (str): Required. Name of this tensorboard experiment. Unique to the given - projects/{project}/locations/{location}/tensorboards/{tensorboard_id}. If it's already set by + projects/{project}/locations/{location}/tensorboards/{tensorboard_id}. logdir (str): Required. path of the log directory to upload tensorboard_id (str): Optional. TensorBoard ID. If not set, tensorboard_id in aiplatform.init will be used. project (str): Optional. Project the TensorBoard is in. If not set, project set in aiplatform.init will be used. diff --git a/google/cloud/aiplatform/training_jobs.py b/google/cloud/aiplatform/training_jobs.py index 0f10a52311..0cfb28c462 100644 --- a/google/cloud/aiplatform/training_jobs.py +++ b/google/cloud/aiplatform/training_jobs.py @@ -591,6 +591,7 @@ def _run_job( gcs_destination_uri_prefix: Optional[str] = None, bigquery_destination: Optional[str] = None, create_request_timeout: Optional[float] = None, + block: Optional[bool] = True, ) -> Optional[models.Model]: """Runs the training job. @@ -769,6 +770,8 @@ def _run_job( - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" create_request_timeout (float): Optional. The timeout for the create request in seconds. + block (bool): + Optional. If True, block until complete. """ input_data_config = self._create_input_data_config( @@ -824,7 +827,7 @@ def _run_job( _LOGGER.info("View Training:\n%s" % self._dashboard_uri()) - model = self._get_model() + model = self._get_model(block=block) if model is None: _LOGGER.warning( @@ -901,7 +904,7 @@ def _force_get_model(self, sync: bool = True) -> models.Model: return model - def _get_model(self) -> Optional[models.Model]: + def _get_model(self, block: bool = True) -> Optional[models.Model]: """Helper method to get and instantiate the Model to Upload. Returns: @@ -911,7 +914,8 @@ def _get_model(self) -> Optional[models.Model]: Raises: RuntimeError: If Training failed. """ - self._block_until_complete() + if block: + self._block_until_complete() if self.has_failed: raise RuntimeError( @@ -3264,10 +3268,8 @@ def run( create_request_timeout=create_request_timeout, ) - @base.optional_sync(construct_object_on_arg="managed_model") - def _run( + def submit( self, - python_packager: source_utils._TrainingScriptPythonPackager, dataset: Optional[ Union[ datasets.ImageDataset, @@ -3275,21 +3277,30 @@ def _run( datasets.TextDataset, datasets.VideoDataset, ] - ], - annotation_schema_uri: Optional[str], - worker_pool_specs: worker_spec_utils._DistributedTrainingSpec, - managed_model: Optional[gca_model.Model] = None, + ] = None, + annotation_schema_uri: Optional[str] = None, + model_display_name: Optional[str] = None, + model_labels: Optional[Dict[str, str]] = None, model_id: Optional[str] = None, parent_model: Optional[str] = None, is_default_version: Optional[bool] = True, model_version_aliases: Optional[Sequence[str]] = None, model_version_description: Optional[str] = None, - args: Optional[List[Union[str, float, int]]] = None, - environment_variables: Optional[Dict[str, str]] = None, base_output_dir: Optional[str] = None, service_account: Optional[str] = None, network: Optional[str] = None, bigquery_destination: Optional[str] = None, + args: Optional[List[Union[str, float, int]]] = None, + environment_variables: Optional[Dict[str, str]] = None, + replica_count: int = 1, + machine_type: str = "n1-standard-4", + accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", + accelerator_count: int = 0, + boot_disk_type: str = "pd-ssd", + boot_disk_size_gb: int = 100, + reduction_server_replica_count: int = 0, + reduction_server_machine_type: Optional[str] = None, + reduction_server_container_uri: Optional[str] = None, training_fraction_split: Optional[float] = None, validation_fraction_split: Optional[float] = None, test_fraction_split: Optional[float] = None, @@ -3303,15 +3314,49 @@ def _run( enable_web_access: bool = False, enable_dashboard_access: bool = False, tensorboard: Optional[str] = None, - reduction_server_container_uri: Optional[str] = None, sync=True, create_request_timeout: Optional[float] = None, ) -> Optional[models.Model]: - """Packages local script and launches training_job. + """Submits the custom training job without blocking until completion. + + Distributed Training Support: + If replica count = 1 then one chief replica will be provisioned. If + replica_count > 1 the remainder will be provisioned as a worker replica pool. + ie: replica_count = 10 will result in 1 chief and 9 workers + All replicas have same machine_type, accelerator_type, and accelerator_count + + If training on a Vertex AI dataset, you can use one of the following split configurations: + Data fraction splits: + Any of ``training_fraction_split``, ``validation_fraction_split`` and + ``test_fraction_split`` may optionally be provided, they must sum to up to 1. If + the provided ones sum to less than 1, the remainder is assigned to sets as + decided by Vertex AI. If none of the fractions are set, by default roughly 80% + of data will be used for training, 10% for validation, and 10% for test. + + Data filter splits: + Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and + ``test_filter_split`` must be provided. + Supported only for unstructured Datasets. + + Predefined splits: + Assigns input data to training, validation, and test sets based on the value of a provided key. + If using predefined splits, ``predefined_split_column_name`` must be provided. + Supported only for tabular Datasets. + + Timestamp splits: + Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. Args: - python_packager (source_utils._TrainingScriptPythonPackager): - Required. Python Packager pointing to training script locally. dataset ( Union[ datasets.ImageDataset, @@ -3320,14 +3365,54 @@ def _run( datasets.VideoDataset, ] ): - Vertex AI to fit this training against. + Vertex AI to fit this training against. Custom training script should + retrieve datasets through passed in environment variables uris: + + os.environ["AIP_TRAINING_DATA_URI"] + os.environ["AIP_VALIDATION_DATA_URI"] + os.environ["AIP_TEST_DATA_URI"] + + Additionally the dataset format is passed in as: + + os.environ["AIP_DATA_FORMAT"] annotation_schema_uri (str): Google Cloud Storage URI points to a YAML file describing - annotation schema. - worker_pools_spec (worker_spec_utils._DistributedTrainingSpec): - Worker pools pecs required to run job. - managed_model (gca_model.Model): - Model proto if this script produces a Managed Model. + annotation schema. The schema is defined as an OpenAPI 3.0.2 + [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files + that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with + ``metadata`` + of the Dataset specified by + ``dataset_id``. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + ``annotations_filter``, + the Annotations used for training are filtered by both + ``annotations_filter`` + and + ``annotation_schema_uri``. + model_display_name (str): + If the script produces a managed Vertex AI Model. The display name of + the Model. The name can be up to 128 characters long and can be consist + of any UTF-8 characters. + + If not provided upon creation, the job's display_name is used. + model_labels (Dict[str, str]): + Optional. The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. model_id (str): Optional. The ID to use for the Model produced by this job, which will become the final component of the model resource name. @@ -3358,18 +3443,6 @@ def _run( The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] model_version_description (str): Optional. The description of the model version being uploaded by this job. - args (List[Unions[str, int, float]]): - Command line arguments to be passed to the Python script. - environment_variables (Dict[str, str]): - Environment variables to be passed to the container. - Should be a dictionary where keys are environment variable names - and values are environment variable values for those names. - At most 10 environment variables can be specified. - The Name of the environment variable must be unique. - - environment_variables = { - 'MY_KEY': 'MY_VALUE' - } base_output_dir (str): GCS output directory of job. If not provided a timestamped directory in the staging directory will be used. @@ -3387,7 +3460,8 @@ def _run( The full name of the Compute Engine network to which the job should be peered. For example, projects/12345/global/networks/myVPC. Private services access must already be configured for the network. - If left unspecified, the job is not peered with any network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. bigquery_destination (str): Provide this field if `dataset` is a BigQuery dataset. The BigQuery project location where the training data is to @@ -3403,6 +3477,44 @@ def _run( - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" + args (List[Unions[str, int, float]]): + Command line arguments to be passed to the Python script. + environment_variables (Dict[str, str]): + Environment variables to be passed to the container. + Should be a dictionary where keys are environment variable names + and values are environment variable values for those names. + At most 10 environment variables can be specified. + The Name of the environment variable must be unique. + + environment_variables = { + 'MY_KEY': 'MY_VALUE' + } + replica_count (int): + The number of worker replicas. If replica count = 1 then one chief + replica will be provisioned. If replica_count > 1 the remainder will be + provisioned as a worker replica pool. + machine_type (str): + The type of machine to use for training. + accelerator_type (str): + Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, + NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, + NVIDIA_TESLA_T4 + accelerator_count (int): + The number of accelerators to attach to a worker replica. + boot_disk_type (str): + Type of the boot disk, default is `pd-ssd`. + Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or + `pd-standard` (Persistent Disk Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk, default is 100GB. + boot disk size must be within the range of [100, 64000]. + reduction_server_replica_count (int): + The number of reduction server replicas, default is 0. + reduction_server_machine_type (str): + Optional. The type of machine to use for reduction server. + reduction_server_container_uri (str): + Optional. The Uri of the reduction server container image. + See details: https://cloud.google.com/vertex-ai/docs/training/distributed-training#reduce_training_time_with_reduction_server training_fraction_split (float): Optional. The fraction of the input data that is to be used to train the Model. This is ignored if Dataset is not provided. @@ -3481,73 +3593,54 @@ def _run( `service_account` is required with provided `tensorboard`. For more information on configuring your service account please visit: https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training - reduction_server_container_uri (str): - Optional. The Uri of the reduction server container image. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. sync (bool): Whether to execute this method synchronously. If False, this method will be executed in concurrent Future and any downstream object will be immediately returned and synced when the Future has completed. - create_request_timeout (float) - Optional. The timeout for the create request in seconds Returns: model: The trained Vertex AI Model resource or None if training did not produce a Vertex AI Model. """ - package_gcs_uri = python_packager.package_and_copy_to_gcs( - gcs_staging_dir=self._staging_bucket, - project=self.project, - credentials=self.credentials, - ) - - for spec_order, spec in enumerate(worker_pool_specs): - - if not spec: - continue - - if ( - spec_order == worker_spec_utils._SPEC_ORDERS["server_spec"] - and reduction_server_container_uri - ): - spec["container_spec"] = { - "image_uri": reduction_server_container_uri, - } - else: - spec["python_package_spec"] = { - "executor_image_uri": self._container_uri, - "python_module": python_packager.module_name, - "package_uris": [package_gcs_uri], - } + network = network or initializer.global_config.network - if args: - spec["python_package_spec"]["args"] = args + worker_pool_specs, managed_model = self._prepare_and_validate_run( + model_display_name=model_display_name, + model_labels=model_labels, + replica_count=replica_count, + machine_type=machine_type, + accelerator_count=accelerator_count, + accelerator_type=accelerator_type, + boot_disk_type=boot_disk_type, + boot_disk_size_gb=boot_disk_size_gb, + reduction_server_replica_count=reduction_server_replica_count, + reduction_server_machine_type=reduction_server_machine_type, + ) - if environment_variables: - spec["python_package_spec"]["env"] = [ - {"name": key, "value": value} - for key, value in environment_variables.items() - ] + # make and copy package + python_packager = source_utils._TrainingScriptPythonPackager( + script_path=self._script_path, requirements=self._requirements + ) - ( - training_task_inputs, - base_output_dir, - ) = self._prepare_training_task_inputs_and_output_dir( + return self._run( + python_packager=python_packager, + dataset=dataset, + annotation_schema_uri=annotation_schema_uri, worker_pool_specs=worker_pool_specs, + managed_model=managed_model, + model_id=model_id, + parent_model=parent_model, + is_default_version=is_default_version, + model_version_aliases=model_version_aliases, + model_version_description=model_version_description, + args=args, + environment_variables=environment_variables, base_output_dir=base_output_dir, service_account=service_account, network=network, - timeout=timeout, - restart_job_on_worker_restart=restart_job_on_worker_restart, - enable_web_access=enable_web_access, - enable_dashboard_access=enable_dashboard_access, - tensorboard=tensorboard, - ) - - model = self._run_job( - training_task_definition=schema.training_job.definition.custom_task, - training_task_inputs=training_task_inputs, - dataset=dataset, - annotation_schema_uri=annotation_schema_uri, + bigquery_destination=bigquery_destination, training_fraction_split=training_fraction_split, validation_fraction_split=validation_fraction_split, test_fraction_split=test_fraction_split, @@ -3556,8 +3649,316 @@ def _run( test_filter_split=test_filter_split, predefined_split_column_name=predefined_split_column_name, timestamp_split_column_name=timestamp_split_column_name, - model=managed_model, - model_id=model_id, + timeout=timeout, + restart_job_on_worker_restart=restart_job_on_worker_restart, + enable_web_access=enable_web_access, + enable_dashboard_access=enable_dashboard_access, + tensorboard=tensorboard, + reduction_server_container_uri=reduction_server_container_uri + if reduction_server_replica_count > 0 + else None, + sync=sync, + create_request_timeout=create_request_timeout, + block=False, + ) + + @base.optional_sync(construct_object_on_arg="managed_model") + def _run( + self, + python_packager: source_utils._TrainingScriptPythonPackager, + dataset: Optional[ + Union[ + datasets.ImageDataset, + datasets.TabularDataset, + datasets.TextDataset, + datasets.VideoDataset, + ] + ], + annotation_schema_uri: Optional[str], + worker_pool_specs: worker_spec_utils._DistributedTrainingSpec, + managed_model: Optional[gca_model.Model] = None, + model_id: Optional[str] = None, + parent_model: Optional[str] = None, + is_default_version: Optional[bool] = True, + model_version_aliases: Optional[Sequence[str]] = None, + model_version_description: Optional[str] = None, + args: Optional[List[Union[str, float, int]]] = None, + environment_variables: Optional[Dict[str, str]] = None, + base_output_dir: Optional[str] = None, + service_account: Optional[str] = None, + network: Optional[str] = None, + bigquery_destination: Optional[str] = None, + training_fraction_split: Optional[float] = None, + validation_fraction_split: Optional[float] = None, + test_fraction_split: Optional[float] = None, + training_filter_split: Optional[str] = None, + validation_filter_split: Optional[str] = None, + test_filter_split: Optional[str] = None, + predefined_split_column_name: Optional[str] = None, + timestamp_split_column_name: Optional[str] = None, + timeout: Optional[int] = None, + restart_job_on_worker_restart: bool = False, + enable_web_access: bool = False, + enable_dashboard_access: bool = False, + tensorboard: Optional[str] = None, + reduction_server_container_uri: Optional[str] = None, + sync=True, + create_request_timeout: Optional[float] = None, + block: Optional[bool] = True, + ) -> Optional[models.Model]: + """Packages local script and launches training_job. + + Args: + python_packager (source_utils._TrainingScriptPythonPackager): + Required. Python Packager pointing to training script locally. + dataset ( + Union[ + datasets.ImageDataset, + datasets.TabularDataset, + datasets.TextDataset, + datasets.VideoDataset, + ] + ): + Vertex AI to fit this training against. + annotation_schema_uri (str): + Google Cloud Storage URI points to a YAML file describing + annotation schema. + worker_pools_spec (worker_spec_utils._DistributedTrainingSpec): + Worker pools pecs required to run job. + managed_model (gca_model.Model): + Model proto if this script produces a Managed Model. + model_id (str): + Optional. The ID to use for the Model produced by this job, + which will become the final component of the model resource name. + This value may be up to 63 characters, and valid characters + are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + parent_model (str): + Optional. The resource name or model ID of an existing model. + The new model uploaded by this job will be a version of `parent_model`. + + Only set this field when training a new version of an existing model. + is_default_version (bool): + Optional. When set to True, the newly uploaded model version will + automatically have alias "default" included. Subsequent uses of + the model produced by this job without a version specified will + use this "default" version. + + When set to False, the "default" alias will not be moved. + Actions targeting the model version produced by this job will need + to specifically reference this version by ID or alias. + + New model uploads, i.e. version 1, will always be "default" aliased. + model_version_aliases (Sequence[str]): + Optional. User provided version aliases so that the model version + uploaded by this job can be referenced via alias instead of + auto-generated version ID. A default version alias will be created + for the first version of the model. + + The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] + model_version_description (str): + Optional. The description of the model version being uploaded by this job. + args (List[Unions[str, int, float]]): + Command line arguments to be passed to the Python script. + environment_variables (Dict[str, str]): + Environment variables to be passed to the container. + Should be a dictionary where keys are environment variable names + and values are environment variable values for those names. + At most 10 environment variables can be specified. + The Name of the environment variable must be unique. + + environment_variables = { + 'MY_KEY': 'MY_VALUE' + } + base_output_dir (str): + GCS output directory of job. If not provided a + timestamped directory in the staging directory will be used. + + Vertex AI sets the following environment variables when it runs your training code: + + - AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. /model/ + - AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. /checkpoints/ + - AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. /logs/ + + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + network (str): + The full name of the Compute Engine network to which the job + should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. + If left unspecified, the job is not peered with any network. + bigquery_destination (str): + Provide this field if `dataset` is a BigQuery dataset. + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data will be written into that dataset. In + the dataset three tables will be created, ``training``, + ``validation`` and ``test``. + + - AIP_DATA_FORMAT = "bigquery". + - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" + - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" + - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" + training_fraction_split (float): + Optional. The fraction of the input data that is to be used to train + the Model. This is ignored if Dataset is not provided. + validation_fraction_split (float): + Optional. The fraction of the input data that is to be used to validate + the Model. This is ignored if Dataset is not provided. + test_fraction_split (float): + Optional. The fraction of the input data that is to be used to evaluate + the Model. This is ignored if Dataset is not provided. + training_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to train the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + validation_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to validate the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + test_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to test the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + predefined_split_column_name (str): + Optional. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + + Supported only for tabular and time series Datasets. + timestamp_split_column_name (str): + Optional. The key is a name of one of the Dataset's data + columns. The value of the key values of the key (the values in + the column) must be in RFC 3339 `date-time` format, where + `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a + piece of data the key is not present or has an invalid value, + that piece is ignored by the pipeline. + + Supported only for tabular and time series Datasets. + timeout (int): + The maximum job running time in seconds. The default is 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + enable_web_access (bool): + Whether you want Vertex AI to enable interactive shell access + to training containers. + https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell + enable_dashboard_access (bool): + Whether you want Vertex AI to enable access to the customized dashboard + to training containers. + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + The training script should write Tensorboard to following Vertex AI environment + variable: + + AIP_TENSORBOARD_LOG_DIR + + `service_account` is required with provided `tensorboard`. + For more information on configuring your service account please visit: + https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + reduction_server_container_uri (str): + Optional. The Uri of the reduction server container image. + sync (bool): + Whether to execute this method synchronously. If False, this method + will be executed in concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. + create_request_timeout (float) + Optional. The timeout for the create request in seconds + block (bool): + Optional. If True, block until complete. + + Returns: + model: The trained Vertex AI Model resource or None if training did not + produce a Vertex AI Model. + """ + package_gcs_uri = python_packager.package_and_copy_to_gcs( + gcs_staging_dir=self._staging_bucket, + project=self.project, + credentials=self.credentials, + ) + + for spec_order, spec in enumerate(worker_pool_specs): + + if not spec: + continue + + if ( + spec_order == worker_spec_utils._SPEC_ORDERS["server_spec"] + and reduction_server_container_uri + ): + spec["container_spec"] = { + "image_uri": reduction_server_container_uri, + } + else: + spec["python_package_spec"] = { + "executor_image_uri": self._container_uri, + "python_module": python_packager.module_name, + "package_uris": [package_gcs_uri], + } + + if args: + spec["python_package_spec"]["args"] = args + + if environment_variables: + spec["python_package_spec"]["env"] = [ + {"name": key, "value": value} + for key, value in environment_variables.items() + ] + + ( + training_task_inputs, + base_output_dir, + ) = self._prepare_training_task_inputs_and_output_dir( + worker_pool_specs=worker_pool_specs, + base_output_dir=base_output_dir, + service_account=service_account, + network=network, + timeout=timeout, + restart_job_on_worker_restart=restart_job_on_worker_restart, + enable_web_access=enable_web_access, + enable_dashboard_access=enable_dashboard_access, + tensorboard=tensorboard, + ) + + model = self._run_job( + training_task_definition=schema.training_job.definition.custom_task, + training_task_inputs=training_task_inputs, + dataset=dataset, + annotation_schema_uri=annotation_schema_uri, + training_fraction_split=training_fraction_split, + validation_fraction_split=validation_fraction_split, + test_fraction_split=test_fraction_split, + training_filter_split=training_filter_split, + validation_filter_split=validation_filter_split, + test_filter_split=test_filter_split, + predefined_split_column_name=predefined_split_column_name, + timestamp_split_column_name=timestamp_split_column_name, + model=managed_model, + model_id=model_id, parent_model=parent_model, is_default_version=is_default_version, model_version_aliases=model_version_aliases, @@ -3565,6 +3966,7 @@ def _run( gcs_destination_uri_prefix=base_output_dir, bigquery_destination=bigquery_destination, create_request_timeout=create_request_timeout, + block=block, ) return model @@ -3763,57 +4165,442 @@ def __init__( The key needs to be in the same region as where the compute resource is created. - If set, this TrainingPipeline will be secured by this key. + If set, this TrainingPipeline will be secured by this key. + + Note: Model trained by this TrainingPipeline is also secured + by this key if ``model_to_upload`` is not set separately. + + Overrides encryption_spec_key_name set in aiplatform.init. + model_encryption_spec_key_name (Optional[str]): + Optional. The Cloud KMS resource identifier of the customer + managed encryption key used to protect the model. Has the + form: + ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. + The key needs to be in the same region as where the compute + resource is created. + + If set, the trained Model will be secured by this key. + + Overrides encryption_spec_key_name set in aiplatform.init. + staging_bucket (str): + Bucket used to stage source and training artifacts. Overrides + staging_bucket set in aiplatform.init. + """ + if not display_name: + display_name = self.__class__._generate_display_name() + super().__init__( + display_name=display_name, + project=project, + location=location, + credentials=credentials, + labels=labels, + training_encryption_spec_key_name=training_encryption_spec_key_name, + model_encryption_spec_key_name=model_encryption_spec_key_name, + container_uri=container_uri, + model_instance_schema_uri=model_instance_schema_uri, + model_parameters_schema_uri=model_parameters_schema_uri, + model_prediction_schema_uri=model_prediction_schema_uri, + model_serving_container_environment_variables=model_serving_container_environment_variables, + model_serving_container_ports=model_serving_container_ports, + model_serving_container_image_uri=model_serving_container_image_uri, + model_serving_container_command=model_serving_container_command, + model_serving_container_args=model_serving_container_args, + model_serving_container_predict_route=model_serving_container_predict_route, + model_serving_container_health_route=model_serving_container_health_route, + model_description=model_description, + explanation_metadata=explanation_metadata, + explanation_parameters=explanation_parameters, + staging_bucket=staging_bucket, + ) + + self._command = command + + def run( + self, + dataset: Optional[ + Union[ + datasets.ImageDataset, + datasets.TabularDataset, + datasets.TextDataset, + datasets.VideoDataset, + ] + ] = None, + annotation_schema_uri: Optional[str] = None, + model_display_name: Optional[str] = None, + model_labels: Optional[Dict[str, str]] = None, + model_id: Optional[str] = None, + parent_model: Optional[str] = None, + is_default_version: Optional[bool] = True, + model_version_aliases: Optional[Sequence[str]] = None, + model_version_description: Optional[str] = None, + base_output_dir: Optional[str] = None, + service_account: Optional[str] = None, + network: Optional[str] = None, + bigquery_destination: Optional[str] = None, + args: Optional[List[Union[str, float, int]]] = None, + environment_variables: Optional[Dict[str, str]] = None, + replica_count: int = 1, + machine_type: str = "n1-standard-4", + accelerator_type: str = "ACCELERATOR_TYPE_UNSPECIFIED", + accelerator_count: int = 0, + boot_disk_type: str = "pd-ssd", + boot_disk_size_gb: int = 100, + reduction_server_replica_count: int = 0, + reduction_server_machine_type: Optional[str] = None, + reduction_server_container_uri: Optional[str] = None, + training_fraction_split: Optional[float] = None, + validation_fraction_split: Optional[float] = None, + test_fraction_split: Optional[float] = None, + training_filter_split: Optional[str] = None, + validation_filter_split: Optional[str] = None, + test_filter_split: Optional[str] = None, + predefined_split_column_name: Optional[str] = None, + timestamp_split_column_name: Optional[str] = None, + timeout: Optional[int] = None, + restart_job_on_worker_restart: bool = False, + enable_web_access: bool = False, + enable_dashboard_access: bool = False, + tensorboard: Optional[str] = None, + sync=True, + create_request_timeout: Optional[float] = None, + ) -> Optional[models.Model]: + """Runs the custom training job. + + Distributed Training Support: + If replica count = 1 then one chief replica will be provisioned. If + replica_count > 1 the remainder will be provisioned as a worker replica pool. + ie: replica_count = 10 will result in 1 chief and 9 workers + All replicas have same machine_type, accelerator_type, and accelerator_count + + If training on a Vertex AI dataset, you can use one of the following split configurations: + Data fraction splits: + Any of ``training_fraction_split``, ``validation_fraction_split`` and + ``test_fraction_split`` may optionally be provided, they must sum to up to 1. If + the provided ones sum to less than 1, the remainder is assigned to sets as + decided by Vertex AI. If none of the fractions are set, by default roughly 80% + of data will be used for training, 10% for validation, and 10% for test. + + Data filter splits: + Assigns input data to training, validation, and test sets + based on the given filters, data pieces not matched by any + filter are ignored. Currently only supported for Datasets + containing DataItems. + If any of the filters in this message are to match nothing, then + they can be set as '-' (the minus sign). + If using filter splits, all of ``training_filter_split``, ``validation_filter_split`` and + ``test_filter_split`` must be provided. + Supported only for unstructured Datasets. + + Predefined splits: + Assigns input data to training, validation, and test sets based on the value of a provided key. + If using predefined splits, ``predefined_split_column_name`` must be provided. + Supported only for tabular Datasets. + + Timestamp splits: + Assigns input data to training, validation, and test sets + based on a provided timestamps. The youngest data pieces are + assigned to training set, next to validation set, and the oldest + to the test set. + Supported only for tabular Datasets. + + Args: + dataset (Union[datasets.ImageDataset,datasets.TabularDataset,datasets.TextDataset,datasets.VideoDataset]): + Vertex AI to fit this training against. Custom training script should + retrieve datasets through passed in environment variables uris: + + os.environ["AIP_TRAINING_DATA_URI"] + os.environ["AIP_VALIDATION_DATA_URI"] + os.environ["AIP_TEST_DATA_URI"] + + Additionally the dataset format is passed in as: + + os.environ["AIP_DATA_FORMAT"] + annotation_schema_uri (str): + Google Cloud Storage URI points to a YAML file describing + annotation schema. The schema is defined as an OpenAPI 3.0.2 + [Schema Object](https://github.com/OAI/OpenAPI-Specification/blob/main/versions/3.0.2.md#schema-object) The schema files + that can be used here are found in + gs://google-cloud-aiplatform/schema/dataset/annotation/, + note that the chosen schema must be consistent with + ``metadata`` + of the Dataset specified by + ``dataset_id``. + + Only Annotations that both match this schema and belong to + DataItems not ignored by the split method are used in + respectively training, validation or test role, depending on + the role of the DataItem they are on. + + When used in conjunction with + ``annotations_filter``, + the Annotations used for training are filtered by both + ``annotations_filter`` + and + ``annotation_schema_uri``. + model_display_name (str): + If the script produces a managed Vertex AI Model. The display name of + the Model. The name can be up to 128 characters long and can be consist + of any UTF-8 characters. + + If not provided upon creation, the job's display_name is used. + model_labels (Dict[str, str]): + Optional. The labels with user-defined metadata to + organize your Models. + Label keys and values can be no longer than 64 + characters (Unicode codepoints), can only + contain lowercase letters, numeric characters, + underscores and dashes. International characters + are allowed. + See https://goo.gl/xmQnxf for more information + and examples of labels. + model_id (str): + Optional. The ID to use for the Model produced by this job, + which will become the final component of the model resource name. + This value may be up to 63 characters, and valid characters + are `[a-z0-9_-]`. The first character cannot be a number or hyphen. + parent_model (str): + Optional. The resource name or model ID of an existing model. + The new model uploaded by this job will be a version of `parent_model`. + + Only set this field when training a new version of an existing model. + is_default_version (bool): + Optional. When set to True, the newly uploaded model version will + automatically have alias "default" included. Subsequent uses of + the model produced by this job without a version specified will + use this "default" version. + + When set to False, the "default" alias will not be moved. + Actions targeting the model version produced by this job will need + to specifically reference this version by ID or alias. + + New model uploads, i.e. version 1, will always be "default" aliased. + model_version_aliases (Sequence[str]): + Optional. User provided version aliases so that the model version + uploaded by this job can be referenced via alias instead of + auto-generated version ID. A default version alias will be created + for the first version of the model. + + The format is [a-z][a-zA-Z0-9-]{0,126}[a-z0-9] + model_version_description (str): + Optional. The description of the model version being uploaded by this job. + base_output_dir (str): + GCS output directory of job. If not provided a + timestamped directory in the staging directory will be used. - Note: Model trained by this TrainingPipeline is also secured - by this key if ``model_to_upload`` is not set separately. + Vertex AI sets the following environment variables when it runs your training code: - Overrides encryption_spec_key_name set in aiplatform.init. - model_encryption_spec_key_name (Optional[str]): - Optional. The Cloud KMS resource identifier of the customer - managed encryption key used to protect the model. Has the - form: - ``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``. - The key needs to be in the same region as where the compute - resource is created. + - AIP_MODEL_DIR: a Cloud Storage URI of a directory intended for saving model artifacts, i.e. /model/ + - AIP_CHECKPOINT_DIR: a Cloud Storage URI of a directory intended for saving checkpoints, i.e. /checkpoints/ + - AIP_TENSORBOARD_LOG_DIR: a Cloud Storage URI of a directory intended for saving TensorBoard logs, i.e. /logs/ - If set, the trained Model will be secured by this key. + service_account (str): + Specifies the service account for workload run-as account. + Users submitting jobs must have act-as permission on this run-as account. + network (str): + The full name of the Compute Engine network to which the job + should be peered. For example, projects/12345/global/networks/myVPC. + Private services access must already be configured for the network. + If left unspecified, the network set in aiplatform.init will be used. + Otherwise, the job is not peered with any network. + bigquery_destination (str): + Provide this field if `dataset` is a BigQuery dataset. + The BigQuery project location where the training data is to + be written to. In the given project a new dataset is created + with name + ``dataset___`` + where timestamp is in YYYY_MM_DDThh_mm_ss_sssZ format. All + training input data will be written into that dataset. In + the dataset three tables will be created, ``training``, + ``validation`` and ``test``. - Overrides encryption_spec_key_name set in aiplatform.init. - staging_bucket (str): - Bucket used to stage source and training artifacts. Overrides - staging_bucket set in aiplatform.init. + - AIP_DATA_FORMAT = "bigquery". + - AIP_TRAINING_DATA_URI ="bigquery_destination.dataset_*.training" + - AIP_VALIDATION_DATA_URI = "bigquery_destination.dataset_*.validation" + - AIP_TEST_DATA_URI = "bigquery_destination.dataset_*.test" + args (List[Unions[str, int, float]]): + Command line arguments to be passed to the Python script. + environment_variables (Dict[str, str]): + Environment variables to be passed to the container. + Should be a dictionary where keys are environment variable names + and values are environment variable values for those names. + At most 10 environment variables can be specified. + The Name of the environment variable must be unique. + + environment_variables = { + 'MY_KEY': 'MY_VALUE' + } + replica_count (int): + The number of worker replicas. If replica count = 1 then one chief + replica will be provisioned. If replica_count > 1 the remainder will be + provisioned as a worker replica pool. + machine_type (str): + The type of machine to use for training. + accelerator_type (str): + Hardware accelerator type. One of ACCELERATOR_TYPE_UNSPECIFIED, + NVIDIA_TESLA_K80, NVIDIA_TESLA_P100, NVIDIA_TESLA_V100, NVIDIA_TESLA_P4, + NVIDIA_TESLA_T4 + accelerator_count (int): + The number of accelerators to attach to a worker replica. + boot_disk_type (str): + Type of the boot disk, default is `pd-ssd`. + Valid values: `pd-ssd` (Persistent Disk Solid State Drive) or + `pd-standard` (Persistent Disk Hard Disk Drive). + boot_disk_size_gb (int): + Size in GB of the boot disk, default is 100GB. + boot disk size must be within the range of [100, 64000]. + reduction_server_replica_count (int): + The number of reduction server replicas, default is 0. + reduction_server_machine_type (str): + Optional. The type of machine to use for reduction server. + reduction_server_container_uri (str): + Optional. The Uri of the reduction server container image. + See details: https://cloud.google.com/vertex-ai/docs/training/distributed-training#reduce_training_time_with_reduction_server + training_fraction_split (float): + Optional. The fraction of the input data that is to be used to train + the Model. This is ignored if Dataset is not provided. + validation_fraction_split (float): + Optional. The fraction of the input data that is to be used to validate + the Model. This is ignored if Dataset is not provided. + test_fraction_split (float): + Optional. The fraction of the input data that is to be used to evaluate + the Model. This is ignored if Dataset is not provided. + training_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to train the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + validation_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to validate the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + test_filter_split (str): + Optional. A filter on DataItems of the Dataset. DataItems that match + this filter are used to test the Model. A filter with same syntax + as the one used in DatasetService.ListDataItems may be used. If a + single DataItem is matched by more than one of the FilterSplit filters, + then it is assigned to the first set that applies to it in the training, + validation, test order. This is ignored if Dataset is not provided. + predefined_split_column_name (str): + Optional. The key is a name of one of the Dataset's data + columns. The value of the key (either the label's value or + value in the column) must be one of {``training``, + ``validation``, ``test``}, and it defines to which set the + given piece of data is assigned. If for a piece of data the + key is not present or has an invalid value, that piece is + ignored by the pipeline. + + Supported only for tabular and time series Datasets. + timestamp_split_column_name (str): + Optional. The key is a name of one of the Dataset's data + columns. The value of the key values of the key (the values in + the column) must be in RFC 3339 `date-time` format, where + `time-offset` = `"Z"` (e.g. 1985-04-12T23:20:50.52Z). If for a + piece of data the key is not present or has an invalid value, + that piece is ignored by the pipeline. + + Supported only for tabular and time series Datasets. + timeout (int): + The maximum job running time in seconds. The default is 7 days. + restart_job_on_worker_restart (bool): + Restarts the entire CustomJob if a worker + gets restarted. This feature can be used by + distributed training jobs that are not resilient + to workers leaving and joining a job. + enable_web_access (bool): + Whether you want Vertex AI to enable interactive shell access + to training containers. + https://cloud.google.com/vertex-ai/docs/training/monitor-debug-interactive-shell + enable_dashboard_access (bool): + Whether you want Vertex AI to enable access to the customized dashboard + to training containers. + tensorboard (str): + Optional. The name of a Vertex AI + [Tensorboard][google.cloud.aiplatform.v1beta1.Tensorboard] + resource to which this CustomJob will upload Tensorboard + logs. Format: + ``projects/{project}/locations/{location}/tensorboards/{tensorboard}`` + + The training script should write Tensorboard to following Vertex AI environment + variable: + + AIP_TENSORBOARD_LOG_DIR + + `service_account` is required with provided `tensorboard`. + For more information on configuring your service account please visit: + https://cloud.google.com/vertex-ai/docs/experiments/tensorboard-training + sync (bool): + Whether to execute this method synchronously. If False, this method + will be executed in concurrent Future and any downstream object will + be immediately returned and synced when the Future has completed. + create_request_timeout (float): + Optional. The timeout for the create request in seconds. + + Returns: + model: The trained Vertex AI Model resource or None if training did not + produce a Vertex AI Model. + + Raises: + RuntimeError: If Training job has already been run, staging_bucket has not + been set, or model_display_name was provided but required arguments + were not provided in constructor. """ - if not display_name: - display_name = self.__class__._generate_display_name() - super().__init__( - display_name=display_name, - project=project, - location=location, - credentials=credentials, - labels=labels, - training_encryption_spec_key_name=training_encryption_spec_key_name, - model_encryption_spec_key_name=model_encryption_spec_key_name, - container_uri=container_uri, - model_instance_schema_uri=model_instance_schema_uri, - model_parameters_schema_uri=model_parameters_schema_uri, - model_prediction_schema_uri=model_prediction_schema_uri, - model_serving_container_environment_variables=model_serving_container_environment_variables, - model_serving_container_ports=model_serving_container_ports, - model_serving_container_image_uri=model_serving_container_image_uri, - model_serving_container_command=model_serving_container_command, - model_serving_container_args=model_serving_container_args, - model_serving_container_predict_route=model_serving_container_predict_route, - model_serving_container_health_route=model_serving_container_health_route, - model_description=model_description, - explanation_metadata=explanation_metadata, - explanation_parameters=explanation_parameters, - staging_bucket=staging_bucket, + network = network or initializer.global_config.network + + worker_pool_specs, managed_model = self._prepare_and_validate_run( + model_display_name=model_display_name, + model_labels=model_labels, + replica_count=replica_count, + machine_type=machine_type, + accelerator_count=accelerator_count, + accelerator_type=accelerator_type, + boot_disk_type=boot_disk_type, + boot_disk_size_gb=boot_disk_size_gb, + reduction_server_replica_count=reduction_server_replica_count, + reduction_server_machine_type=reduction_server_machine_type, ) - self._command = command + return self._run( + dataset=dataset, + annotation_schema_uri=annotation_schema_uri, + worker_pool_specs=worker_pool_specs, + managed_model=managed_model, + model_id=model_id, + parent_model=parent_model, + is_default_version=is_default_version, + model_version_aliases=model_version_aliases, + model_version_description=model_version_description, + args=args, + environment_variables=environment_variables, + base_output_dir=base_output_dir, + service_account=service_account, + network=network, + bigquery_destination=bigquery_destination, + training_fraction_split=training_fraction_split, + validation_fraction_split=validation_fraction_split, + test_fraction_split=test_fraction_split, + training_filter_split=training_filter_split, + validation_filter_split=validation_filter_split, + test_filter_split=test_filter_split, + predefined_split_column_name=predefined_split_column_name, + timestamp_split_column_name=timestamp_split_column_name, + timeout=timeout, + restart_job_on_worker_restart=restart_job_on_worker_restart, + enable_web_access=enable_web_access, + enable_dashboard_access=enable_dashboard_access, + tensorboard=tensorboard, + reduction_server_container_uri=reduction_server_container_uri + if reduction_server_replica_count > 0 + else None, + sync=sync, + create_request_timeout=create_request_timeout, + ) - def run( + def submit( self, dataset: Optional[ Union[ @@ -3862,7 +4649,7 @@ def run( sync=True, create_request_timeout: Optional[float] = None, ) -> Optional[models.Model]: - """Runs the custom training job. + """Submits the custom training job without blocking until completion. Distributed Training Support: If replica count = 1 then one chief replica will be provisioned. If @@ -4196,6 +4983,7 @@ def run( else None, sync=sync, create_request_timeout=create_request_timeout, + block=False, ) @base.optional_sync(construct_object_on_arg="managed_model") @@ -4239,6 +5027,7 @@ def _run( reduction_server_container_uri: Optional[str] = None, sync=True, create_request_timeout: Optional[float] = None, + block: Optional[bool] = True, ) -> Optional[models.Model]: """Packages local script and launches training_job. Args: @@ -4418,6 +5207,8 @@ def _run( be immediately returned and synced when the Future has completed. create_request_timeout (float): Optional. The timeout for the create request in seconds. + block (bool): + Optional. If True, block until complete. Returns: model: The trained Vertex AI Model resource or None if training did not @@ -4488,6 +5279,7 @@ def _run( gcs_destination_uri_prefix=base_output_dir, bigquery_destination=bigquery_destination, create_request_timeout=create_request_timeout, + block=block, ) return model diff --git a/google/cloud/aiplatform/utils/gcs_utils.py b/google/cloud/aiplatform/utils/gcs_utils.py index a8c86430ba..486fddd152 100644 --- a/google/cloud/aiplatform/utils/gcs_utils.py +++ b/google/cloud/aiplatform/utils/gcs_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2022 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -300,6 +300,53 @@ def download_file_from_gcs( source_blob.download_to_filename(filename=destination_file_path) +def download_from_gcs( + source_uri: str, + destination_path: str, + project: Optional[str] = None, + credentials: Optional[auth_credentials.Credentials] = None, +): + """Downloads GCS files to local path. + + Args: + source_uri (str): + Required. GCS URI(or prefix) of the file(s) to download. + destination_path (str): + Required. local path where the data should be downloaded. + If provided a file path, then `source_uri` must refer to a file. + If provided a directory path, then `source_uri` must refer to a prefix. + project (str): + Optional. Google Cloud Project that contains the staging bucket. + credentials (auth_credentials.Credentials): + Optional. The custom credentials to use when making API calls. + If not provided, default credentials will be used. + + Raises: + GoogleCloudError: When the download process fails. + """ + project = project or initializer.global_config.project + credentials = credentials or initializer.global_config.credentials + + storage_client = storage.Client(project=project, credentials=credentials) + + validate_gcs_path(source_uri) + bucket_name, prefix = source_uri.replace("gs://", "").split("/", maxsplit=1) + + blobs = storage_client.list_blobs(bucket_or_name=bucket_name, prefix=prefix) + for blob in blobs: + # In SDK 2.0 remote training, we'll create some empty files. + # These files ends with '/', and we'll skip them. + if not blob.name.endswith("/"): + rel_path = os.path.relpath(blob.name, prefix) + filename = ( + destination_path + if rel_path == "." + else os.path.join(destination_path, rel_path) + ) + os.makedirs(os.path.dirname(filename), exist_ok=True) + blob.download_to_filename(filename=filename) + + def _upload_pandas_df_to_gcs( df: "pandas.DataFrame", upload_gcs_path: str, file_format: str = "jsonl" ) -> None: diff --git a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/instance_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/params_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/predict/prediction_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py +++ b/google/cloud/aiplatform/v1/schema/trainingjob/definition_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/params_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform/version.py b/google/cloud/aiplatform/version.py index 068748077e..b8b9fff801 100644 --- a/google/cloud/aiplatform/version.py +++ b/google/cloud/aiplatform/version.py @@ -15,4 +15,4 @@ # limitations under the License. # -__version__ = "1.26.1" +__version__ = "1.27.0" diff --git a/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py b/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py index 86ce9df770..49862182c6 100644 --- a/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py +++ b/google/cloud/aiplatform/vizier/pyvizier/proto_converters.py @@ -1,9 +1,11 @@ """Converters for OSS Vizier's protos from/to PyVizier's classes.""" -import datetime import logging +from datetime import timezone from typing import List, Optional, Sequence, Tuple, Union from google.protobuf import duration_pb2 +from google.protobuf import struct_pb2 +from google.protobuf import timestamp_pb2 from google.cloud.aiplatform.compat.types import study as study_pb2 from google.cloud.aiplatform.vizier.pyvizier import ScaleType from google.cloud.aiplatform.vizier.pyvizier import ParameterType @@ -80,8 +82,8 @@ def _set_default_value( default_value: Union[float, int, str], ): """Sets the protos' default_value field.""" - which_pv_spec = proto.WhichOneof("parameter_value_spec") - getattr(proto, which_pv_spec).default_value.value = default_value + which_pv_spec = proto._pb.WhichOneof("parameter_value_spec") + getattr(proto, which_pv_spec).default_value = default_value @classmethod def _matching_parent_values( @@ -280,17 +282,16 @@ def to_proto( cls, parameter_value: ParameterValue, name: str ) -> study_pb2.Trial.Parameter: """Returns Parameter Proto.""" - proto = study_pb2.Trial.Parameter(parameter_id=name) - if isinstance(parameter_value.value, int): - proto.value.number_value = parameter_value.value + value = struct_pb2.Value(number_value=parameter_value.value) elif isinstance(parameter_value.value, bool): - proto.value.bool_value = parameter_value.value + value = struct_pb2.Value(bool_value=parameter_value.value) elif isinstance(parameter_value.value, float): - proto.value.number_value = parameter_value.value + value = struct_pb2.Value(number_value=parameter_value.value) elif isinstance(parameter_value.value, str): - proto.value.string_value = parameter_value.value + value = struct_pb2.Value(string_value=parameter_value.value) + proto = study_pb2.Trial.Parameter(parameter_id=name, value=value) return proto @@ -340,18 +341,19 @@ def from_proto(cls, proto: study_pb2.Measurement) -> Measurement: @classmethod def to_proto(cls, measurement: Measurement) -> study_pb2.Measurement: """Converts to Measurement proto.""" - proto = study_pb2.Measurement() + int_seconds = int(measurement.elapsed_secs) + proto = study_pb2.Measurement( + elapsed_duration=duration_pb2.Duration( + seconds=int_seconds, + nanos=int(1e9 * (measurement.elapsed_secs - int_seconds)), + ) + ) for name, metric in measurement.metrics.items(): proto.metrics.append( study_pb2.Measurement.Metric(metric_id=name, value=metric.value) ) proto.step_count = measurement.steps - int_seconds = int(measurement.elapsed_secs) - proto.elapsed_duration = duration_pb2.Duration( - seconds=int_seconds, - nanos=int(1e9 * (measurement.elapsed_secs - int_seconds)), - ) return proto @@ -426,8 +428,11 @@ def from_proto(cls, proto: study_pb2.Trial) -> Trial: infeasibility_reason = None if proto.state == study_pb2.Trial.State.SUCCEEDED: if proto.end_time: - completion_ts = proto.end_time.nanosecond / 1e9 - completion_time = datetime.datetime.fromtimestamp(completion_ts) + completion_time = ( + proto.end_time.timestamp_pb() + .ToDatetime() + .replace(tzinfo=timezone.utc) + ) elif proto.state == study_pb2.Trial.State.INFEASIBLE: infeasibility_reason = proto.infeasible_reason @@ -437,8 +442,11 @@ def from_proto(cls, proto: study_pb2.Trial) -> Trial: creation_time = None if proto.start_time: - creation_ts = proto.start_time.nanosecond / 1e9 - creation_time = datetime.datetime.fromtimestamp(creation_ts) + creation_time = ( + proto.start_time.timestamp_pb() + .ToDatetime() + .replace(tzinfo=timezone.utc) + ) return Trial( id=int(proto.name.split("/")[-1]), description=proto.name, @@ -481,22 +489,26 @@ def to_proto(cls, pytrial: Trial) -> study_pb2.Trial: # pytrial always adds an empty metric. Ideally, we should remove it if the # metric does not exist in the study config. + # setattr() is required here as `proto.final_measurement.CopyFrom` + # raises AttributeErrors when setting the field on the pb2 compat types. if pytrial.final_measurement is not None: - proto.final_measurement.CopyFrom( - MeasurementConverter.to_proto(pytrial.final_measurement) + setattr( + proto, + "final_measurement", + MeasurementConverter.to_proto(pytrial.final_measurement), ) for measurement in pytrial.measurements: proto.measurements.append(MeasurementConverter.to_proto(measurement)) if pytrial.creation_time is not None: - creation_secs = datetime.datetime.timestamp(pytrial.creation_time) - proto.start_time.seconds = int(creation_secs) - proto.start_time.nanos = int(1e9 * (creation_secs - int(creation_secs))) + start_time = timestamp_pb2.Timestamp() + start_time.FromDatetime(pytrial.creation_time) + setattr(proto, "start_time", start_time) if pytrial.completion_time is not None: - completion_secs = datetime.datetime.timestamp(pytrial.completion_time) - proto.end_time.seconds = int(completion_secs) - proto.end_time.nanos = int(1e9 * (completion_secs - int(completion_secs))) + end_time = timestamp_pb2.Timestamp() + end_time.FromDatetime(pytrial.completion_time) + setattr(proto, "end_time", end_time) if pytrial.infeasibility_reason is not None: proto.infeasible_reason = pytrial.infeasibility_reason return proto diff --git a/google/cloud/aiplatform_v1/gapic_version.py b/google/cloud/aiplatform_v1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform_v1/gapic_version.py +++ b/google/cloud/aiplatform_v1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/google/cloud/aiplatform_v1beta1/gapic_version.py b/google/cloud/aiplatform_v1beta1/gapic_version.py index 1b2aef3edb..26ab66b2e5 100644 --- a/google/cloud/aiplatform_v1beta1/gapic_version.py +++ b/google/cloud/aiplatform_v1beta1/gapic_version.py @@ -13,4 +13,4 @@ # See the License for the specific language governing permissions and # limitations under the License. # -__version__ = "1.26.1" # {x-release-please-version} +__version__ = "1.27.0" # {x-release-please-version} diff --git a/noxfile.py b/noxfile.py index 2e7154fd4e..babd58a573 100644 --- a/noxfile.py +++ b/noxfile.py @@ -191,9 +191,9 @@ def unit(session): def install_systemtest_dependencies(session, *constraints): # Use pre-release gRPC for system tests. - # Exclude version 1.49.0rc1 which has a known issue. - # See https://github.com/grpc/grpc/pull/30642 - session.install("--pre", "grpcio!=1.49.0rc1") + # Exclude version 1.52.0rc1 which has a known issue. + # See https://github.com/grpc/grpc/issues/32163 + session.install("--pre", "grpcio!=1.52.0rc1") session.install(*SYSTEM_TEST_STANDARD_DEPENDENCIES, *constraints) @@ -275,7 +275,7 @@ def cover(session): session.run("coverage", "erase") -@nox.session(python=DEFAULT_PYTHON_VERSION) +@nox.session(python="3.9") def docs(session): """Build the docs for this library.""" @@ -301,17 +301,16 @@ def docs(session): ) -@nox.session(python=DEFAULT_PYTHON_VERSION) +@nox.session(python="3.9") def docfx(session): """Build the docfx yaml files for this library.""" session.install("-e", ".") session.install( - "sphinx==4.0.1", + "gcp-sphinx-docfx-yaml", "alabaster", "google-cloud-aiplatform[prediction]", "recommonmark", - "gcp-sphinx-docfx-yaml", ) shutil.rmtree(os.path.join("docs", "_build"), ignore_errors=True) @@ -349,9 +348,7 @@ def prerelease_deps(session): unit_deps_all = UNIT_TEST_STANDARD_DEPENDENCIES + UNIT_TEST_EXTERNAL_DEPENDENCIES session.install(*unit_deps_all) system_deps_all = ( - SYSTEM_TEST_STANDARD_DEPENDENCIES - + SYSTEM_TEST_EXTERNAL_DEPENDENCIES - + SYSTEM_TEST_EXTRAS + SYSTEM_TEST_STANDARD_DEPENDENCIES + SYSTEM_TEST_EXTERNAL_DEPENDENCIES ) session.install(*system_deps_all) @@ -381,8 +378,8 @@ def prerelease_deps(session): # dependency of grpc "six", "googleapis-common-protos", - # Exclude version 1.49.0rc1 which has a known issue. See https://github.com/grpc/grpc/pull/30642 - "grpcio!=1.49.0rc1", + # Exclude version 1.52.0rc1 which has a known issue. See https://github.com/grpc/grpc/issues/32163 + "grpcio!=1.52.0rc1", "grpcio-status", "google-api-core", "proto-plus", diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json index d49d67157b..a768913d9d 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.26.1" + "version": "1.27.0" }, "snippets": [ { diff --git a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json index ee391ba153..2bb2c635e1 100644 --- a/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json +++ b/samples/generated_samples/snippet_metadata_google.cloud.aiplatform.v1beta1.json @@ -8,7 +8,7 @@ ], "language": "PYTHON", "name": "google-cloud-aiplatform", - "version": "1.26.1" + "version": "1.27.0" }, "snippets": [ { diff --git a/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample.py b/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample.py index 88400ec240..e4f1c896af 100644 --- a/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample.py +++ b/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample.py @@ -25,7 +25,7 @@ def autologging_with_auto_run_creation_sample( location: str, ): aiplatform.init( - experiment_name=experiment_name, + experiment=experiment_name, project=project, location=location, experiment_tensorboard=experiment_tensorboard, diff --git a/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample_test.py b/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample_test.py index 7880a8fdf0..bc3e50d413 100644 --- a/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample_test.py +++ b/samples/model-builder/experiment_tracking/autologging_with_auto_run_creation_sample_test.py @@ -27,7 +27,7 @@ def test_autologging_with_auto_run_creation_sample(mock_sdk_init, mock_autolog): ) mock_sdk_init.assert_called_with( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, experiment_tensorboard=constants.TENSORBOARD_NAME, diff --git a/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample.py b/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample.py index 817a0c2a22..c41b1eb285 100644 --- a/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample.py +++ b/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample.py @@ -26,7 +26,7 @@ def autologging_with_manual_run_creation_sample( location: str, ): aiplatform.init( - experiment_name=experiment_name, + experiment=experiment_name, project=project, location=location, experiment_tensorboard=experiment_tensorboard, diff --git a/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample_test.py b/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample_test.py index 75765cc04d..e0a772a834 100644 --- a/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample_test.py +++ b/samples/model-builder/experiment_tracking/autologging_with_manual_run_creation_sample_test.py @@ -30,7 +30,7 @@ def test_autologging_with_manual_run_creation_sample( ) mock_sdk_init.assert_called_with( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, experiment_tensorboard=constants.TENSORBOARD_NAME, diff --git a/samples/model-builder/experiment_tracking/create_experiment_run_sample.py b/samples/model-builder/experiment_tracking/create_experiment_run_sample.py index 0767139eba..6e554d3b8b 100644 --- a/samples/model-builder/experiment_tracking/create_experiment_run_sample.py +++ b/samples/model-builder/experiment_tracking/create_experiment_run_sample.py @@ -25,7 +25,7 @@ def create_experiment_run_sample( project: str, location: str, ): - aiplatform.init(experiment_name=experiment_name, project=project, location=location) + aiplatform.init(experiment=experiment_name, project=project, location=location) aiplatform.start_run(run=run_name, tensorboard=experiment_run_tensorboard) diff --git a/samples/model-builder/experiment_tracking/create_experiment_run_sample_test.py b/samples/model-builder/experiment_tracking/create_experiment_run_sample_test.py index 1c91daecb7..93c938727f 100644 --- a/samples/model-builder/experiment_tracking/create_experiment_run_sample_test.py +++ b/samples/model-builder/experiment_tracking/create_experiment_run_sample_test.py @@ -27,7 +27,7 @@ def test_create_experiment_run_sample(mock_sdk_init, mock_start_run): ) mock_sdk_init.assert_called_with( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, ) diff --git a/samples/model-builder/experiment_tracking/end_experiment_run_sample.py b/samples/model-builder/experiment_tracking/end_experiment_run_sample.py index 5161c15937..3a4b3f9959 100644 --- a/samples/model-builder/experiment_tracking/end_experiment_run_sample.py +++ b/samples/model-builder/experiment_tracking/end_experiment_run_sample.py @@ -23,7 +23,7 @@ def end_experiment_run_sample( project: str, location: str, ): - aiplatform.init(experiment_name=experiment_name, project=project, location=location) + aiplatform.init(experiment=experiment_name, project=project, location=location) aiplatform.start_run(run=run_name, resume=True) diff --git a/samples/model-builder/experiment_tracking/end_experiment_run_sample_test.py b/samples/model-builder/experiment_tracking/end_experiment_run_sample_test.py index a28a18f1a3..72fe94c123 100644 --- a/samples/model-builder/experiment_tracking/end_experiment_run_sample_test.py +++ b/samples/model-builder/experiment_tracking/end_experiment_run_sample_test.py @@ -26,7 +26,7 @@ def test_end_experiment_run_sample(mock_sdk_init, mock_start_run, mock_end_run): ) mock_sdk_init.assert_called_with( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, ) diff --git a/samples/model-builder/experiment_tracking/log_time_series_metrics_sample.py b/samples/model-builder/experiment_tracking/log_time_series_metrics_sample.py index 2ab8d46d2f..70b80440fa 100644 --- a/samples/model-builder/experiment_tracking/log_time_series_metrics_sample.py +++ b/samples/model-builder/experiment_tracking/log_time_series_metrics_sample.py @@ -28,7 +28,7 @@ def log_time_series_metrics_sample( project: str, location: str, ): - aiplatform.init(experiment_name=experiment_name, project=project, location=location) + aiplatform.init(experiment=experiment_name, project=project, location=location) aiplatform.start_run(run_name=run_name, resume=True) diff --git a/samples/model-builder/experiment_tracking/resume_experiment_run_sample.py b/samples/model-builder/experiment_tracking/resume_experiment_run_sample.py index 6ba254f07f..3e2c0d779b 100644 --- a/samples/model-builder/experiment_tracking/resume_experiment_run_sample.py +++ b/samples/model-builder/experiment_tracking/resume_experiment_run_sample.py @@ -23,7 +23,7 @@ def resume_experiment_run_sample( project: str, location: str, ): - aiplatform.init(experiment_name=experiment_name, project=project, location=location) + aiplatform.init(experiment=experiment_name, project=project, location=location) aiplatform.start_run(run=run_name, resume=True) diff --git a/samples/model-builder/experiment_tracking/resume_experiment_run_sample_test.py b/samples/model-builder/experiment_tracking/resume_experiment_run_sample_test.py index 4cb5aa24fe..456cd5be78 100644 --- a/samples/model-builder/experiment_tracking/resume_experiment_run_sample_test.py +++ b/samples/model-builder/experiment_tracking/resume_experiment_run_sample_test.py @@ -26,7 +26,7 @@ def test_resume_experiment_run_sample(mock_sdk_init, mock_start_run): ) mock_sdk_init.assert_called_with( - experiment_name=constants.EXPERIMENT_NAME, + experiment=constants.EXPERIMENT_NAME, project=constants.PROJECT, location=constants.LOCATION, ) diff --git a/samples/model-builder/noxfile.py b/samples/model-builder/noxfile.py index 0398d72ff6..7c8a63994c 100644 --- a/samples/model-builder/noxfile.py +++ b/samples/model-builder/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/samples/snippets/noxfile.py b/samples/snippets/noxfile.py index 0398d72ff6..7c8a63994c 100644 --- a/samples/snippets/noxfile.py +++ b/samples/snippets/noxfile.py @@ -18,7 +18,7 @@ import os from pathlib import Path import sys -from typing import Callable, Dict, List, Optional +from typing import Callable, Dict, Optional import nox @@ -89,7 +89,7 @@ def get_pytest_env_vars() -> Dict[str, str]: # DO NOT EDIT - automatically generated. # All versions used to test samples. -ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10"] +ALL_VERSIONS = ["3.7", "3.8", "3.9", "3.10", "3.11"] # Any default versions that should be ignored. IGNORED_VERSIONS = TEST_CONFIG["ignored_versions"] @@ -109,22 +109,6 @@ def get_pytest_env_vars() -> Dict[str, str]: # -def _determine_local_import_names(start_dir: str) -> List[str]: - """Determines all import names that should be considered "local". - - This is used when running the linter to insure that import order is - properly checked. - """ - file_ext_pairs = [os.path.splitext(path) for path in os.listdir(start_dir)] - return [ - basename - for basename, extension in file_ext_pairs - if extension == ".py" - or os.path.isdir(os.path.join(start_dir, basename)) - and basename not in ("__pycache__") - ] - - # Linting with flake8. # # We ignore the following rules: @@ -139,7 +123,6 @@ def _determine_local_import_names(start_dir: str) -> List[str]: "--show-source", "--builtin=gettext", "--max-complexity=20", - "--import-order-style=google", "--exclude=.nox,.cache,env,lib,generated_pb2,*_pb2.py,*_pb2_grpc.py", "--ignore=E121,E123,E126,E203,E226,E24,E266,E501,E704,W503,W504,I202", "--max-line-length=88", @@ -149,14 +132,11 @@ def _determine_local_import_names(start_dir: str) -> List[str]: @nox.session def lint(session: nox.sessions.Session) -> None: if not TEST_CONFIG["enforce_type_hints"]: - session.install("flake8", "flake8-import-order") + session.install("flake8") else: - session.install("flake8", "flake8-import-order", "flake8-annotations") + session.install("flake8", "flake8-annotations") - local_names = _determine_local_import_names(".") args = FLAKE8_COMMON_ARGS + [ - "--application-import-names", - ",".join(local_names), ".", ] session.run("flake8", *args) diff --git a/tests/system/aiplatform/test_language_models.py b/tests/system/aiplatform/test_language_models.py index 54fa57d299..b905d3b075 100644 --- a/tests/system/aiplatform/test_language_models.py +++ b/tests/system/aiplatform/test_language_models.py @@ -124,6 +124,7 @@ def test_tuning(self, shared_state): train_steps=1, tuning_job_location="europe-west4", tuned_model_location="us-central1", + learning_rate=2.0, ) # According to the Pipelines design, external resources created by a pipeline # must not be modified or deleted. Otherwise caching will break next pipeline runs. diff --git a/tests/unit/aiplatform/test_language_models.py b/tests/unit/aiplatform/test_language_models.py index 0b38d8713e..5c478a637c 100644 --- a/tests/unit/aiplatform/test_language_models.py +++ b/tests/unit/aiplatform/test_language_models.py @@ -101,7 +101,7 @@ "name": "publishers/google/models/codechat-bison", "version_id": "001", "open_source_category": "PROPRIETARY", - "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW, + "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA, "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/codechat-bison@001", "predict_schemata": { "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/codechat_generation_1.0.0.yaml", @@ -114,7 +114,7 @@ "name": "publishers/google/models/code-bison", "version_id": "001", "open_source_category": "PROPRIETARY", - "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW, + "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA, "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/code-bison@001", "predict_schemata": { "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/code_generation_1.0.0.yaml", @@ -127,7 +127,7 @@ "name": "publishers/google/models/code-gecko", "version_id": "001", "open_source_category": "PROPRIETARY", - "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW, + "launch_stage": gca_publisher_model.PublisherModel.LaunchStage.GA, "publisher_model_template": "projects/{user-project}/locations/{location}/publishers/google/models/code-gecko@001", "predict_schemata": { "instance_schema_uri": "gs://google-cloud-aiplatform/schema/predict/instance/code_generation_1.0.0.yaml", @@ -164,11 +164,13 @@ } _TEST_CHAT_GENERATION_PREDICTION1 = { - "safetyAttributes": { - "scores": [], - "blocked": False, - "categories": [], - }, + "safetyAttributes": [ + { + "scores": [], + "blocked": False, + "categories": [], + } + ], "candidates": [ { "author": "1", @@ -177,11 +179,13 @@ ], } _TEST_CHAT_GENERATION_PREDICTION2 = { - "safetyAttributes": { - "scores": [], - "blocked": False, - "categories": [], - }, + "safetyAttributes": [ + { + "scores": [], + "blocked": False, + "categories": [], + } + ], "candidates": [ { "author": "1", @@ -661,8 +665,13 @@ def test_tune_model( training_data=_TEST_TEXT_BISON_TRAINING_DF, tuning_job_location="europe-west4", tuned_model_location="us-central1", + learning_rate=0.1, ) call_kwargs = mock_pipeline_service_create.call_args[1] + pipeline_arguments = call_kwargs[ + "pipeline_job" + ].runtime_config.parameter_values + assert pipeline_arguments["learning_rate"] == 0.1 assert ( call_kwargs["pipeline_job"].encryption_spec.kms_key_name == _TEST_ENCRYPTION_KEY_NAME @@ -747,6 +756,16 @@ def test_chat(self): output_text="Ned likes watching movies.", ), ], + message_history=[ + preview_language_models.ChatMessage( + author=preview_language_models.ChatSession.USER_AUTHOR, + content="Question 1?", + ), + preview_language_models.ChatMessage( + author=preview_language_models.ChatSession.MODEL_AUTHOR, + content="Answer 1.", + ), + ], temperature=0.0, ) @@ -764,11 +783,11 @@ def test_chat(self): ] response = chat.send_message(message_text1) assert response.text == expected_response1 - assert len(chat.message_history) == 2 - assert chat.message_history[0].author == chat.USER_AUTHOR - assert chat.message_history[0].content == message_text1 - assert chat.message_history[1].author == chat.MODEL_AUTHOR - assert chat.message_history[1].content == expected_response1 + assert len(chat.message_history) == 4 + assert chat.message_history[2].author == chat.USER_AUTHOR + assert chat.message_history[2].content == message_text1 + assert chat.message_history[3].author == chat.MODEL_AUTHOR + assert chat.message_history[3].content == expected_response1 gca_predict_response2 = gca_prediction_service.PredictResponse() gca_predict_response2.predictions.append(_TEST_CHAT_GENERATION_PREDICTION2) @@ -784,11 +803,11 @@ def test_chat(self): ] response = chat.send_message(message_text2, temperature=0.1) assert response.text == expected_response2 - assert len(chat.message_history) == 4 - assert chat.message_history[2].author == chat.USER_AUTHOR - assert chat.message_history[2].content == message_text2 - assert chat.message_history[3].author == chat.MODEL_AUTHOR - assert chat.message_history[3].content == expected_response2 + assert len(chat.message_history) == 6 + assert chat.message_history[4].author == chat.USER_AUTHOR + assert chat.message_history[4].content == message_text2 + assert chat.message_history[5].author == chat.MODEL_AUTHOR + assert chat.message_history[5].content == expected_response2 # Validating the parameters chat_temperature = 0.1 @@ -848,7 +867,7 @@ def test_code_chat(self): _CODECHAT_BISON_PUBLISHER_MODEL_DICT ), ) as mock_get_publisher_model: - model = preview_language_models.CodeChatModel.from_pretrained( + model = language_models.CodeChatModel.from_pretrained( "google/codechat-bison@001" ) @@ -944,7 +963,7 @@ def test_code_generation(self): _CODE_GENERATION_BISON_PUBLISHER_MODEL_DICT ), ) as mock_get_publisher_model: - model = preview_language_models.CodeGenerationModel.from_pretrained( + model = language_models.CodeGenerationModel.from_pretrained( "google/code-bison@001" ) @@ -971,11 +990,9 @@ def test_code_generation(self): # Validating the parameters predict_temperature = 0.1 predict_max_output_tokens = 100 - default_temperature = ( - preview_language_models.CodeGenerationModel._DEFAULT_TEMPERATURE - ) + default_temperature = language_models.CodeGenerationModel._DEFAULT_TEMPERATURE default_max_output_tokens = ( - preview_language_models.CodeGenerationModel._DEFAULT_MAX_OUTPUT_TOKENS + language_models.CodeGenerationModel._DEFAULT_MAX_OUTPUT_TOKENS ) with mock.patch.object( @@ -1012,7 +1029,7 @@ def test_code_completion(self): _CODE_COMPLETION_BISON_PUBLISHER_MODEL_DICT ), ) as mock_get_publisher_model: - model = preview_language_models.CodeGenerationModel.from_pretrained( + model = language_models.CodeGenerationModel.from_pretrained( "google/code-gecko@001" ) @@ -1039,11 +1056,9 @@ def test_code_completion(self): # Validating the parameters predict_temperature = 0.1 predict_max_output_tokens = 100 - default_temperature = ( - preview_language_models.CodeGenerationModel._DEFAULT_TEMPERATURE - ) + default_temperature = language_models.CodeGenerationModel._DEFAULT_TEMPERATURE default_max_output_tokens = ( - preview_language_models.CodeGenerationModel._DEFAULT_MAX_OUTPUT_TOKENS + language_models.CodeGenerationModel._DEFAULT_MAX_OUTPUT_TOKENS ) with mock.patch.object( diff --git a/tests/unit/aiplatform/test_metadata.py b/tests/unit/aiplatform/test_metadata.py index 545a4c016f..c0b64685fd 100644 --- a/tests/unit/aiplatform/test_metadata.py +++ b/tests/unit/aiplatform/test_metadata.py @@ -556,6 +556,23 @@ def get_artifact_mock(): yield get_artifact_mock +@pytest.fixture +def get_artifact_mock_with_metadata(): + with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock: + get_artifact_mock.return_value = GapicArtifact( + name=_TEST_ARTIFACT_NAME, + display_name=_TEST_ARTIFACT_ID, + schema_title=constants.SYSTEM_METRICS, + schema_version=constants.SCHEMA_VERSIONS[constants.SYSTEM_METRICS], + metadata={ + google.cloud.aiplatform.metadata.constants._VERTEX_EXPERIMENT_TRACKING_LABEL: True, + constants.GCP_ARTIFACT_RESOURCE_NAME_KEY: test_constants.TensorboardConstants._TEST_TENSORBOARD_RUN_NAME, + constants._STATE_KEY: gca_execution.Execution.State.RUNNING, + }, + ) + yield get_artifact_mock + + @pytest.fixture def get_artifact_not_found_mock(): with patch.object(MetadataServiceClient, "get_artifact") as get_artifact_mock: @@ -2026,6 +2043,27 @@ def test_experiment_run_get_logged_custom_jobs(self, get_custom_job_mock): retry=base._DEFAULT_RETRY, ) + @pytest.mark.usefixtures( + "get_metadata_store_mock", + "get_experiment_mock", + "get_experiment_run_mock", + "get_context_mock", + "list_contexts_mock", + "list_executions_mock", + "get_artifact_mock_with_metadata", + "update_context_mock", + ) + def test_update_experiment_run_after_list( + self, + ): + aiplatform.init( + project=_TEST_PROJECT, + location=_TEST_LOCATION, + ) + + experiment_run_list = aiplatform.ExperimentRun.list(experiment=_TEST_EXPERIMENT) + experiment_run_list[0].update_state(gca_execution.Execution.State.FAILED) + class TestTensorboard: def test_get_or_create_default_tb_with_existing_default( diff --git a/tests/unit/aiplatform/test_utils.py b/tests/unit/aiplatform/test_utils.py index 86d490ab94..70374475c5 100644 --- a/tests/unit/aiplatform/test_utils.py +++ b/tests/unit/aiplatform/test_utils.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- -# Copyright 2020 Google LLC +# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ import json import os import re +import tempfile import textwrap from typing import Callable, Dict, Optional, Tuple from unittest import mock @@ -97,6 +98,36 @@ def mock_storage_blob_upload_from_filename(): yield mock_blob_upload_from_filename +@pytest.fixture +def mock_storage_client_list_blobs(): + with patch("google.cloud.storage.Client.list_blobs") as mock_list_blobs: + mock_list_blobs.return_value = [ + storage.Blob(name=f"{GCS_PREFIX}/", bucket=GCS_BUCKET), + storage.Blob(name=f"{GCS_PREFIX}/{FAKE_FILENAME}-1", bucket=GCS_BUCKET), + storage.Blob( + name=f"{GCS_PREFIX}/fake-dir/{FAKE_FILENAME}-2", bucket=GCS_BUCKET + ), + ] + yield mock_list_blobs + + +@pytest.fixture +def mock_storage_client_list_blob(): + with patch("google.cloud.storage.Client.list_blobs") as mock_list_blobs: + mock_list_blobs.return_value = [ + storage.Blob(name=f"{GCS_PREFIX}/{FAKE_FILENAME}", bucket=GCS_BUCKET), + ] + yield mock_list_blobs + + +@pytest.fixture +def mock_storage_blob_download_to_filename(): + with patch( + "google.cloud.storage.Blob.download_to_filename" + ) as mock_blob_download_to_filename: + yield mock_blob_download_to_filename + + @pytest.fixture() def mock_bucket_not_exist(): with patch("google.cloud.storage.Blob.from_string") as mock_bucket_not_exist, patch( @@ -570,6 +601,60 @@ def test_create_gcs_bucket_for_pipeline_artifacts_if_it_does_not_exist( output == "gs://test-project-vertex-pipelines-us-central1/output_artifacts/" ) + def test_download_from_gcs_dir( + self, mock_storage_client_list_blobs, mock_storage_blob_download_to_filename + ): + with tempfile.TemporaryDirectory() as temp_dir: + source_uri = f"gs://{GCS_BUCKET}/{GCS_PREFIX}" + destination_path = f"{temp_dir}/test-dir" + + gcs_utils.download_from_gcs(source_uri, destination_path) + + mock_storage_client_list_blobs.assert_called_once_with( + bucket_or_name=GCS_BUCKET, + prefix=GCS_PREFIX, + ) + + assert mock_storage_blob_download_to_filename.call_count == 2 + mock_storage_blob_download_to_filename.assert_any_call( + filename=f"{destination_path}/{FAKE_FILENAME}-1" + ) + mock_storage_blob_download_to_filename.assert_any_call( + filename=f"{destination_path}/fake-dir/{FAKE_FILENAME}-2" + ) + + def test_download_from_gcs_file( + self, mock_storage_client_list_blob, mock_storage_blob_download_to_filename + ): + with tempfile.TemporaryDirectory() as temp_dir: + source_uri = f"gs://{GCS_BUCKET}/{GCS_PREFIX}/{FAKE_FILENAME}" + destination_path = f"{temp_dir}/test-file" + + gcs_utils.download_from_gcs(source_uri, destination_path) + + mock_storage_client_list_blob.assert_called_once_with( + bucket_or_name=GCS_BUCKET, + prefix=f"{GCS_PREFIX}/{FAKE_FILENAME}", + ) + + mock_storage_blob_download_to_filename.assert_called_once_with( + filename=destination_path + ) + + def test_download_from_gcs_invalid_source_uri(self): + with tempfile.TemporaryDirectory() as temp_dir: + source_uri = f"{GCS_BUCKET}/{GCS_PREFIX}" + destination_path = f"{temp_dir}/test-dir" + + with pytest.raises( + ValueError, + match=( + f"Invalid GCS path {source_uri}. " + "Please provide a valid GCS path starting with 'gs://'" + ), + ): + gcs_utils.download_from_gcs(source_uri, destination_path) + def test_validate_gcs_path(self): test_valid_path = "gs://test_valid_path" gcs_utils.validate_gcs_path(test_valid_path) diff --git a/tests/unit/aiplatform/test_vizier.py b/tests/unit/aiplatform/test_vizier.py index 9b47761368..27bd751a42 100644 --- a/tests/unit/aiplatform/test_vizier.py +++ b/tests/unit/aiplatform/test_vizier.py @@ -14,29 +14,31 @@ # See the License for the specific language governing permissions and # limitations under the License. -import pytest - - -from unittest import mock from importlib import reload -from unittest.mock import patch +from unittest import mock from unittest.mock import ANY +from unittest.mock import patch +import attr from google.api_core import exceptions from google.api_core import operation - from google.cloud import aiplatform -from google.cloud.aiplatform.vizier import Study -from google.cloud.aiplatform.vizier import Trial from google.cloud.aiplatform import initializer -from google.cloud.aiplatform.vizier import pyvizier - from google.cloud.aiplatform.compat.services import vizier_service_client +from google.cloud.aiplatform.compat.types import study as study_pb2 +from google.cloud.aiplatform.compat.types import study as gca_study from google.cloud.aiplatform.compat.types import ( - study as gca_study, vizier_service as gca_vizier_service, ) +from google.cloud.aiplatform.vizier import pyvizier +from google.cloud.aiplatform.vizier import Study +from google.cloud.aiplatform.vizier import Trial +from google.cloud.aiplatform.vizier.pyvizier import proto_converters +import pytest + from google.protobuf import duration_pb2 +from google.protobuf import struct_pb2 +from google.protobuf import timestamp_pb2 # project @@ -619,3 +621,402 @@ def test_materialize(self): materialize_trial.parameters.get_value(_TEST_PARAMETER_ID_1) == _TEST_PARAMETER_ID_MIN_VALUE_1 ) + + +class TestMeasurementConverter: + def test_measurement_proto_with_empty_named_metric(self): + proto = study_pb2.Measurement() + proto.metrics.append(study_pb2.Measurement.Metric(metric_id="", value=0.8)) + + measurement = proto_converters.MeasurementConverter.from_proto(proto) + assert measurement.metrics[""] == pyvizier.Metric(value=0.8) + + def test_measurement_creation(self): + measurement = pyvizier.Measurement( + metrics={ + "": pyvizier.Metric(value=0), + # The empty metric always exists in Measurement. + "pr-auc:": pyvizier.Metric(value=0.8), + "latency": pyvizier.Metric(value=32), + }, + elapsed_secs=12, + steps=12, + ) + proto = proto_converters.MeasurementConverter.to_proto(measurement) + assert attr.asdict( + proto_converters.MeasurementConverter.from_proto(proto) + ) == attr.asdict(measurement) + + +class TestParameterValueConverter: + def test_to_double_proto(self): + value = pyvizier.ParameterValue(True) + assert proto_converters.ParameterValueConverter.to_proto( + value, "aa" + ) == study_pb2.Trial.Parameter( + parameter_id="aa", value=struct_pb2.Value(number_value=1.0) + ) + + def test_to_discrete_proto(self): + value = pyvizier.ParameterValue(True) + assert proto_converters.ParameterValueConverter.to_proto( + value, "aa" + ) == study_pb2.Trial.Parameter( + parameter_id="aa", value=struct_pb2.Value(number_value=1.0) + ) + + def testto_string_proto(self): + value = pyvizier.ParameterValue("category") + assert proto_converters.ParameterValueConverter.to_proto( + value, "aa" + ) == study_pb2.Trial.Parameter( + parameter_id="aa", value=struct_pb2.Value(string_value="category") + ) + + def test_to_integer_proto(self): + value = pyvizier.ParameterValue(True) + assert proto_converters.ParameterValueConverter.to_proto( + value, "aa" + ) == study_pb2.Trial.Parameter( + parameter_id="aa", value=struct_pb2.Value(number_value=1.0) + ) + + +class TestTrialConverter: + def test_from_proto_completed(self): + proto = study_pb2.Trial(name=str(1)) + proto.state = study_pb2.Trial.State.SUCCEEDED + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="float", value=struct_pb2.Value(number_value=1.0) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="int", value=struct_pb2.Value(number_value=2) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="str", value=struct_pb2.Value(string_value="3") + ) + ) + proto.final_measurement.metrics.append( + study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.8) + ) + proto.final_measurement.metrics.append( + study_pb2.Measurement.Metric(metric_id="latency", value=32) + ) + + creation_secs = 1586649600 + start_time = timestamp_pb2.Timestamp( + seconds=int(creation_secs), + nanos=int(1e9 * (creation_secs - int(creation_secs))), + ) + setattr(proto, "start_time", start_time) + + completion_secs = 1586649600 + 10 + end_time = timestamp_pb2.Timestamp( + seconds=int(completion_secs), + nanos=int(1e9 * (completion_secs - int(completion_secs))), + ) + setattr(proto, "end_time", end_time) + + proto.measurements.append( + study_pb2.Measurement( + step_count=10, elapsed_duration=duration_pb2.Duration(seconds=15) + ) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.7) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="latency", value=42) + ) + + proto.measurements.append( + study_pb2.Measurement( + step_count=20, elapsed_duration=duration_pb2.Duration(seconds=30) + ) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="pr-auc", value=0.75) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="latency", value=37) + ) + + test = proto_converters.TrialConverter.from_proto(proto=proto) + assert test.id == 1 + assert test.status == pyvizier.TrialStatus.COMPLETED + assert test.is_completed + assert not test.infeasible + assert test.infeasibility_reason is None + assert len(test.parameters) == 3 + assert test.parameters["float"].value == 1.0 + assert test.parameters["int"].value == 2 + assert test.parameters["str"].value == "3" + + # Final measurement + assert len(test.final_measurement.metrics) == 2 + assert test.final_measurement.metrics["pr-auc"].value == 0.8 + assert test.final_measurement.metrics["latency"].value == 32 + + # Intermediate measurement + assert test.measurements[0] == pyvizier.Measurement( + metrics={"pr-auc": 0.7, "latency": 42}, steps=10, elapsed_secs=15 + ) + + assert test.measurements[1] == pyvizier.Measurement( + metrics={"pr-auc": 0.75, "latency": 37}, steps=20, elapsed_secs=30 + ) + + assert test.id == 1 + + assert test.creation_time is not None + assert test.creation_time.timestamp() == start_time.seconds + assert test.completion_time is not None + assert test.completion_time.timestamp() == end_time.seconds + assert test.duration.total_seconds() == 10 + + assert not test.infeasible + + def test_from_proto_pending(self): + proto = study_pb2.Trial(name=str(2)) + proto.state = study_pb2.Trial.State.ACTIVE + + start_time = timestamp_pb2.Timestamp(seconds=int(1586649600)) + setattr(proto, "start_time", start_time) + + test = proto_converters.TrialConverter.from_proto(proto=proto) + assert test.status == pyvizier.TrialStatus.ACTIVE + assert not test.is_completed + assert not test.infeasible + assert test.infeasibility_reason is None + assert test.creation_time is not None + assert test.completion_time is None + assert test.duration is None + + def test_from_proto_infeasible(self): + proto = study_pb2.Trial(name=str(1)) + proto.state = study_pb2.Trial.State.INFEASIBLE + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="float", value=struct_pb2.Value(number_value=1.0) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="int", value=struct_pb2.Value(number_value=2) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="str", value=struct_pb2.Value(string_value="3") + ) + ) + + start_time = timestamp_pb2.Timestamp(seconds=int(1586649600)) + setattr(proto, "start_time", start_time) + end_time = timestamp_pb2.Timestamp(seconds=int(1586649600 + 10)) + setattr(proto, "end_time", end_time) + setattr(proto, "infeasible_reason", "A reason") + + test = proto_converters.TrialConverter.from_proto(proto=proto) + assert test.status == pyvizier.TrialStatus.COMPLETED + assert test.is_completed + assert test.infeasible + assert test.infeasibility_reason == "A reason" + + def test_from_proto_invalid_trial(self): + proto = study_pb2.Trial(name=str(2)) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="float", value=struct_pb2.Value(number_value=1.0) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="float", value=struct_pb2.Value(number_value=2.0) + ) + ) + proto.state = study_pb2.Trial.State.ACTIVE + start_time = timestamp_pb2.Timestamp(seconds=int(1586649600)) + setattr(proto, "start_time", start_time) + try: + proto_converters.TrialConverter.from_proto(proto=proto) + except ValueError as e: + assert "Invalid trial proto" in str(e) + + +class TestTrialConverterToProto: + def _get_single_objective_base_trial(self): + proto = study_pb2.Trial( + name="owners/my_username/studies/2", id="2", client_id="worker0" + ) + + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="activation", value=struct_pb2.Value(string_value="relu") + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="synchronus", value=struct_pb2.Value(string_value="true") + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="batch_size", value=struct_pb2.Value(number_value=32) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="floating_point_param", + value=struct_pb2.Value(number_value=32.0), + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="learning_rate", value=struct_pb2.Value(number_value=0.5) + ) + ) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="units", value=struct_pb2.Value(number_value=50) + ) + ) + creation_secs = 1630505100 + start_time = timestamp_pb2.Timestamp( + seconds=int(creation_secs), + nanos=int(1e9 * (creation_secs - int(creation_secs))), + ) + setattr(proto, "start_time", start_time) + return proto + + def test_parameter_back_to_back_conversion(self): + proto = self._get_single_objective_base_trial() + proto.state = study_pb2.Trial.State.ACTIVE + pytrial = proto_converters.TrialConverter.from_proto(proto) + got = proto_converters.TrialConverter.to_proto(pytrial) + assert proto == got + + def test_final_measurement_back_to_back_conversion(self): + proto = study_pb2.Trial( + name=str(1), + id=str(1), + state=study_pb2.Trial.State.SUCCEEDED, + final_measurement=gca_study.Measurement( + step_count=101, elapsed_duration=duration_pb2.Duration(seconds=67) + ), + ) + creation_secs = 12456 + start_time = timestamp_pb2.Timestamp( + seconds=int(creation_secs), + nanos=int(1e9 * (creation_secs - int(creation_secs))), + ) + setattr(proto, "start_time", start_time) + + completion_secs = 12456 + 10 + end_time = timestamp_pb2.Timestamp( + seconds=int(completion_secs), + nanos=int(1e9 * (completion_secs - int(completion_secs))), + ) + setattr(proto, "end_time", end_time) + proto.parameters.append( + study_pb2.Trial.Parameter( + parameter_id="learning_rate", value=struct_pb2.Value(number_value=0.5) + ) + ) + proto.final_measurement.metrics.append( + study_pb2.Measurement.Metric(metric_id="loss", value=56.8) + ) + proto.final_measurement.metrics.append( + study_pb2.Measurement.Metric(metric_id="objective", value=77.7) + ) + proto.final_measurement.metrics.append( + study_pb2.Measurement.Metric(metric_id="objective2", value=-0.2) + ) + + pytrial = proto_converters.TrialConverter.from_proto(proto) + got = proto_converters.TrialConverter.to_proto(pytrial) + assert proto == got + + def test_measurement_back_to_back_conversion(self): + proto = study_pb2.Trial( + name=str(2), + id=str(2), + state=study_pb2.Trial.State.ACTIVE, + client_id="worker0", + ) + creation_secs = 1630505100 + start_time = timestamp_pb2.Timestamp( + seconds=int(creation_secs), + nanos=int(1e9 * (creation_secs - int(creation_secs))), + ) + setattr(proto, "start_time", start_time) + proto.measurements.append( + study_pb2.Measurement( + step_count=123, elapsed_duration=duration_pb2.Duration(seconds=22) + ) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="objective", value=0.4321) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="loss", value=0.001) + ) + + proto.measurements.append( + study_pb2.Measurement( + step_count=789, elapsed_duration=duration_pb2.Duration(seconds=55) + ) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="objective", value=0.21) + ) + proto.measurements[-1].metrics.append( + study_pb2.Measurement.Metric(metric_id="loss", value=0.0001) + ) + + pytrial = proto_converters.TrialConverter.from_proto(proto) + got = proto_converters.TrialConverter.to_proto(pytrial) + assert proto == got + + +class TestParameterConfigConverterToProto: + def test_discrete_config_to_proto(self): + feasible_values = (-1, 3, 2) + parameter_config = pyvizier.ParameterConfig.factory( + "name", + feasible_values=feasible_values, + scale_type=pyvizier.ScaleType.LOG, + default_value=2, + ) + + proto = proto_converters.ParameterConfigConverter.to_proto(parameter_config) + assert proto.parameter_id == "name" + assert proto.discrete_value_spec.values == [-1.0, 2.0, 3.0] + assert proto.discrete_value_spec.default_value == 2 + assert ( + proto.scale_type + == study_pb2.StudySpec.ParameterSpec.ScaleType.UNIT_LOG_SCALE + ) + + +class TestParameterConfigConverterFromProto: + def test_creates_from_good_proto(self): + proto = study_pb2.StudySpec.ParameterSpec( + parameter_id="name", + discrete_value_spec=study_pb2.StudySpec.ParameterSpec.DiscreteValueSpec( + values=[1.0, 2.0, 3.0], default_value=2.0 + ), + ) + + parameter_config = proto_converters.ParameterConfigConverter.from_proto(proto) + + assert parameter_config.name == proto.parameter_id + assert parameter_config.type == pyvizier.ParameterType.DISCRETE + assert parameter_config.bounds == (1.0, 3.0) + assert parameter_config.feasible_values == [1.0, 2.0, 3.0] + assert parameter_config.default_value == 2.0 diff --git a/vertexai/_model_garden/_model_garden_models.py b/vertexai/_model_garden/_model_garden_models.py index 506bc23a39..86c44fb988 100644 --- a/vertexai/_model_garden/_model_garden_models.py +++ b/vertexai/_model_garden/_model_garden_models.py @@ -34,6 +34,13 @@ "text-bison": "https://us-kfp.pkg.dev/vertex-ai/large-language-model-pipelines/tune-large-model/sdk-1-25" } +_SDK_PRIVATE_PREVIEW_LAUNCH_STAGE = frozenset( + [ + gca_publisher_model.PublisherModel.LaunchStage.PRIVATE_PREVIEW, + gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW, + gca_publisher_model.PublisherModel.LaunchStage.GA, + ] +) _SDK_PUBLIC_PREVIEW_LAUNCH_STAGE = frozenset( [ gca_publisher_model.PublisherModel.LaunchStage.PUBLIC_PREVIEW, diff --git a/vertexai/language_models/__init__.py b/vertexai/language_models/__init__.py index ecab1cf7f1..95f09e711d 100644 --- a/vertexai/language_models/__init__.py +++ b/vertexai/language_models/__init__.py @@ -15,6 +15,9 @@ """Classes for working with language models.""" from vertexai.language_models._language_models import ( + CodeChatModel, + CodeChatSession, + CodeGenerationModel, InputOutputTextPair, TextEmbedding, TextEmbeddingModel, @@ -23,6 +26,9 @@ ) __all__ = [ + "CodeChatModel", + "CodeChatSession", + "CodeGenerationModel", "InputOutputTextPair", "TextEmbedding", "TextEmbeddingModel", diff --git a/vertexai/language_models/_language_models.py b/vertexai/language_models/_language_models.py index 0d6a5bfebd..e6dd7b63b5 100644 --- a/vertexai/language_models/_language_models.py +++ b/vertexai/language_models/_language_models.py @@ -139,6 +139,7 @@ def tune_model( training_data: Union[str, "pandas.core.frame.DataFrame"], *, train_steps: int = 1000, + learning_rate: Optional[float] = None, tuning_job_location: Optional[str] = None, tuned_model_location: Optional[str] = None, model_display_name: Optional[str] = None, @@ -151,6 +152,7 @@ def tune_model( training_data: A Pandas DataFrame of a URI pointing to data in JSON lines format. The dataset must have the "input_text" and "output_text" columns. train_steps: Number of training steps to perform. + learning_rate: Learning rate for the tuning tuning_job_location: GCP location where the tuning job should be run. Only "europe-west4" is supported for now. tuned_model_location: GCP location where the tuned model should be deployed. Only "us-central1" is supported for now. model_display_name: Custom display name for the tuned model. @@ -184,6 +186,7 @@ def tune_model( model_id=model_info.tuning_model_id, tuning_pipeline_uri=model_info.tuning_pipeline_uri, model_display_name=model_display_name, + learning_rate=learning_rate, ) job = _LanguageModelTuningJob( @@ -665,6 +668,7 @@ class CodeChatModel(_ChatModelBase): """ _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/codechat_generation_1.0.0.yaml" + _LAUNCH_STAGE = _model_garden_models._SDK_GA_LAUNCH_STAGE _DEFAULT_MAX_OUTPUT_TOKENS = 128 _DEFAULT_TEMPERATURE = 0.5 @@ -795,7 +799,8 @@ def send_message( ) prediction = prediction_response.predictions[0] - safety_attributes = prediction["safetyAttributes"] + # ! Note: For chat models, the safetyAttributes is a list. + safety_attributes = prediction["safetyAttributes"][0] response_obj = TextGenerationResponse( text=prediction["candidates"][0]["content"] if prediction.get("candidates") @@ -914,7 +919,7 @@ class CodeGenerationModel(_LanguageModel): _INSTANCE_SCHEMA_URI = "gs://google-cloud-aiplatform/schema/predict/instance/code_generation_1.0.0.yaml" - _LAUNCH_STAGE = _model_garden_models._SDK_PUBLIC_PREVIEW_LAUNCH_STAGE + _LAUNCH_STAGE = _model_garden_models._SDK_GA_LAUNCH_STAGE _DEFAULT_TEMPERATURE = 0.0 _DEFAULT_MAX_OUTPUT_TOKENS = 128 @@ -1041,6 +1046,7 @@ def _launch_tuning_job( tuning_pipeline_uri: str, train_steps: Optional[int] = None, model_display_name: Optional[str] = None, + learning_rate: Optional[float] = None, ) -> aiplatform.PipelineJob: output_dir_uri = _generate_tuned_model_dir_uri(model_id=model_id) if isinstance(training_data, str): @@ -1062,6 +1068,7 @@ def _launch_tuning_job( train_steps=train_steps, tuning_pipeline_uri=tuning_pipeline_uri, model_display_name=model_display_name, + learning_rate=learning_rate, ) return job @@ -1071,11 +1078,15 @@ def _launch_tuning_job_on_jsonl_data( dataset_name_or_uri: str, tuning_pipeline_uri: str, train_steps: Optional[int] = None, + learning_rate: Optional[float] = None, model_display_name: Optional[str] = None, ) -> aiplatform.PipelineJob: if not model_display_name: # Creating a human-readable model display name - name = f"{model_id} tuned for {train_steps} steps on " + name = f"{model_id} tuned for {train_steps} steps" + if learning_rate: + name += f" with learning rate {learning_rate}" + name += " on " # Truncating the start of the dataset URI to keep total length <= 128. max_display_name_length = 128 if len(dataset_name_or_uri + name) <= max_display_name_length: @@ -1095,6 +1106,8 @@ def _launch_tuning_job_on_jsonl_data( "large_model_reference": model_id, "model_display_name": model_display_name, } + if learning_rate: + pipeline_arguments["learning_rate"] = learning_rate if dataset_name_or_uri.startswith("projects/"): pipeline_arguments["dataset_name"] = dataset_name_or_uri diff --git a/vertexai/preview/language_models.py b/vertexai/preview/language_models.py index 43447a8e50..ae41214b10 100644 --- a/vertexai/preview/language_models.py +++ b/vertexai/preview/language_models.py @@ -17,6 +17,7 @@ from vertexai.language_models._language_models import ( _PreviewTextEmbeddingModel, _PreviewTextGenerationModel, + ChatMessage, ChatModel, ChatSession, CodeChatModel, @@ -31,6 +32,7 @@ TextEmbeddingModel = _PreviewTextEmbeddingModel __all__ = [ + "ChatMessage", "ChatModel", "ChatSession", "CodeChatModel",