ModuleNotFoundError: No module named 'azureml.training'

Microsoft

hi, 

I am trying below tutorial but i am running module not avaiable error
https://learn.microsoft.com/en-us/azure/synapse-analytics/machine-learning/tutorial-score-model-pred...

code that is resulting error
#Bind model within Spark session model = pcontext.bind_model( return_types=RETURN_TYPES, runtime=RUNTIME, model_alias="<random_alias_name>", #This alias will be used in PREDICT call to refer this model model_uri=ADLS_MODEL_URI, #In case of AML, it will be AML_MODEL_URI aml_workspace=ws #This is only for AML. In case of ADLS, this parameter can be removed ).register()

I am using the correct pyspark 3.2 and python 3.8 version. what could i be missing for the 'azureml.training' module?

--------------------------------------------------------------------------- ModuleNotFoundError Traceback (most recent call last) /tmp/ipykernel_7333/1720277795.py in <module> 1 #Bind model within Spark session ----> 2 model = pcontext.bind_model( 3 return_types=RETURN_TYPES, 4 runtime=RUNTIME, 5 model_alias="predictbreakfixtickets-expmodel1", #This alias will be used in PREDICT call to refer this model ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_context.py in bind_model(return_types, runtime, model_alias, model_uri, meta_data, functions, aml_workspace, num_threads, sparkml_model_type) 45 num_threads = int(spark_session.conf.get("spark.task.cpus", "1")) 46 ---> 47 udf = _create_udf( 48 spark_session, 49 return_types, ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_udf.py in _create_udf(spark_session, return_types, runtime, model_alias, model_uri, meta_data, functions, aml_workspace, **kwargs) 102 103 runtime_gen = RuntimeGenerator(runtime, model_alias, resolved_model_uri, meta_data, functions, **kwargs) --> 104 model_runtime = runtime_gen._create_runtime() 105 106 if model_runtime == "sparkml_runtime": ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_runtime.py in _create_runtime(self) 101 # Retrieve the model and check the compatibility - e.g. do not run sklearn model with an ONNX runtime 102 # Will raise an exception if the model and runtime are not compatible --> 103 if self._check_model_runtime_compatibility(model_runtime): 104 return "sparkml_runtime" 105 ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_runtime.py in _check_model_runtime_compatibility(self, runtime) 164 # and will make the workspace object unpickable (as of Nov 2020). 165 # A better solution would be to pass around the AML credentials, maybe. --> 166 model_wrapper = self._load() 167 168 if runtime == ModelRuntime.MLFLOW or runtime == ModelRuntime.AUTO: ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_runtime.py in _load(self) 76 77 def _load(self): ---> 78 return SynapsePredictModelCache._get_or_load( 79 self.runtime, 80 self.model_alias, ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/core/_cache.py in _get_or_load(runtime, model_alias, model_uri, meta_data, functions) 170 model_uri = SynapsePredictModelCache._get_cluster_model(model_uri) 171 --> 172 model = load_model(runtime, model_uri, functions) 173 if model is None: 174 raise RuntimeError("Load model failed") ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/utils/_model_loader.py in load_model(runtime, model_uri, functions) 255 raise NotImplementedError("Runtime of model not provided.") 256 --> 257 model = loader.load(model_uri, functions) 258 return model ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/utils/_model_loader.py in load(self, model_uri, functions) 118 model = self._load_external(model_uri, load_function) 119 elif is_mlflow_model(model_uri): --> 120 model = self._load_mlflow(model_uri) 121 else: 122 model = self._load(model_uri) ~/cluster-env/env/lib/python3.8/site-packages/azure/synapse/ml/predict/utils/_model_loader.py in _load_mlflow(self, model_uri) 57 Model object or None 58 """ ---> 59 model = mlflow.pyfunc.load_model(model_uri) 60 61 return model ~/cluster-env/env/lib/python3.8/site-packages/mlflow/pyfunc/__init__.py in load_model(model_uri, suppress_warnings) 653 mlflow.pyfunc.utils._add_code_to_system_path(code_path=code_path) 654 data_path = os.path.join(local_path, conf[DATA]) if (DATA in conf) else local_path --> 655 model_impl = importlib.import_module(conf[MAIN])._load_pyfunc(data_path) 656 return PyFuncModel(model_meta=model_meta, model_impl=model_impl) 657 ~/cluster-env/env/lib/python3.8/site-packages/mlflow/sklearn/__init__.py in _load_pyfunc(path) 391 path = os.path.join(path, pyfunc_flavor_conf["model_path"]) 392 --> 393 return _load_model_from_local_file(path=path, serialization_format=serialization_format) 394 395 ~/cluster-env/env/lib/python3.8/site-packages/mlflow/sklearn/__init__.py in _load_model_from_local_file(path, serialization_format) 345 # That's why we check the serialization format of the model before deserializing 346 if serialization_format == SERIALIZATION_FORMAT_PICKLE: --> 347 return pickle.load(f) 348 elif serialization_format == SERIALIZATION_FORMAT_CLOUDPICKLE: 349 import cloudpickle ModuleNotFoundError: No module named 'azureml.training'


0 Replies